metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "1geraldine1/project_landmark_image_recognition",
"score": 2
} |
#### File: project_landmark_image_recognition/landmark_train_model/Image_model_train.py
```python
import os
os.environ["CUDA_DEVICE_ORDER"] = 'PCI_BUS_ID'
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from pathlib import Path
import tensorflow as tf
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from matplotlib import pyplot as plt
import numpy as np
from tensorflow.python.keras.preprocessing.image_dataset import image_dataset_from_directory
from sklearn.model_selection import train_test_split
import tensorflow_datasets as tfds
from glob import glob
BASE_DIR = Path(__file__).resolve().parent
# 4class = landmark, 72class = image_collected
# data_dir = os.path.join(BASE_DIR, "landmark")
data_dir = os.path.join(BASE_DIR, "image_collected")
data_cropped_dir = os.path.join(BASE_DIR, "cropped_landmark")
test_data_dir = os.path.join(BASE_DIR, "Test")
json_dir = os.path.join(BASE_DIR, "landmark_json")
categories = os.listdir(data_dir)
num_classes = len(categories)
gpus = tf.config.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs, ", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
print(e)
strategy = tf.distribute.MirroredStrategy()
# tf.profiler.experimental.start('./log')
def model_cnn():
img_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255, rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest',
validation_split=0.2)
feature_shape = (None, 128, 128, 3)
label_shape = (None,)
epoch = 50
train_ds = tf.data.Dataset.from_generator(
lambda: img_gen.flow_from_directory(
data_dir, classes=categories,
class_mode='sparse', subset='training', seed=123,
target_size=(128, 128), batch_size=32
),
output_signature=(
tf.TensorSpec(shape=tf.TensorShape(feature_shape), dtype=tf.float16),
tf.TensorSpec(shape=tf.TensorShape(label_shape), dtype=tf.float16)
)
)
val_ds = tf.data.Dataset.from_generator(
lambda: img_gen.flow_from_directory(
data_dir, classes=categories,
class_mode='sparse', subset='validation', seed=123,
target_size=(128, 128), batch_size=32
),
output_signature=(
tf.TensorSpec(shape=tf.TensorShape(feature_shape), dtype=tf.float16),
tf.TensorSpec(shape=tf.TensorShape(label_shape), dtype=tf.float16)
)
)
train_ds = train_ds.prefetch(100 * 32)
val_ds = val_ds.prefetch(100 * 32)
with tf.device('gpu:0'):
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, (3, 3)),
tf.keras.layers.PReLU(),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
tf.keras.layers.Conv2D(32, (3, 3)),
tf.keras.layers.PReLU(),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
tf.keras.layers.Conv2D(32, (3, 3)),
tf.keras.layers.PReLU(),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64),
tf.keras.layers.PReLU(),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(num_classes),
tf.keras.layers.Softmax()
])
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit_generator(
train_ds,
epochs=epoch,
# steps_per_epoch = train개수 / batch_size => 10259/32 => 올림하여 321
steps_per_epoch=321,
validation_data=val_ds,
# validation_steps = valid개수 / batch_size => 2563/32 => 올림하여 81
validation_steps=81
)
model.save('model01_noncrop.h5')
plot_model(history, epoch)
def model_efficientnet():
image_size = 600
batch_size = 16
ds = image_dataset_from_directory(data_dir,
label_mode='categorical',
class_names=categories,
shuffle=True,
batch_size=batch_size,
image_size=(image_size, image_size)
)
train_size = int(0.7 * len(list(ds)))
test_size = int(0.15 * len(list(ds)))
ds = ds.shuffle(buffer_size=len(list(ds)))
train_ds = ds.take(train_size)
test_ds = ds.skip(train_size)
val_ds = test_ds.skip(test_size)
test_ds = test_ds.take(test_size)
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
test_ds = test_ds.cache().prefetch(buffer_size=AUTOTUNE)
data_augmentation = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'),
tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),
tf.keras.layers.experimental.preprocessing.RandomTranslation(0.2, 0.2, fill_mode='constant')
], name='data_augmentation')
preprocess_input = tf.keras.applications.efficientnet.preprocess_input
with strategy.scope():
inputs = tf.keras.layers.Input(shape=(image_size, image_size, 3))
x = preprocess_input(inputs)
x = data_augmentation(x)
IMG_SHAPE = (image_size, image_size) + (3,)
base_model = tf.keras.applications.EfficientNetB7(input_shape=IMG_SHAPE,
input_tensor=x,
include_top=False,
weights='imagenet')
base_model.trainable = False
x = tf.keras.layers.GlobalAveragePooling2D()(base_model.output)
x = tf.keras.layers.BatchNormalization()(x)
top_dropout_rate = 0.2
x = tf.keras.layers.Dropout(top_dropout_rate)(x)
outputs = tf.keras.layers.Dense(num_classes, activation='softmax')(x)
model = tf.keras.Model(inputs, outputs)
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
model.summary()
with strategy.scope():
loss0, accuracy0 = model.evaluate(val_ds)
print('initial loss : {:.2f}'.format(loss0))
print('initial accuracy: {:.1f}%'.format(accuracy0 * 100))
initial_epochs = 50
filepath = './checkpoint/full_data_v3_initial.h5'
checkpoint = ModelCheckpoint(filepath=filepath, mode='max', monitor='val_accuracy', verbose=1, save_best_only=True)
callbacks_list = [checkpoint]
tf.debugging.set_log_device_placement(True)
with strategy.scope():
history = model.fit(train_ds,
epochs=initial_epochs,
validation_data=val_ds,
callbacks=callbacks_list)
# model performance check
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()), 1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0, 1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
# plt.show()
# fine tuning - unfreeze model
for layer in model.layers[-50:]:
if not isinstance(layer, tf.keras.layers.BatchNormalization):
layer.trainable = True
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
# fine tuning - training
fine_tune_epochs = 50
total_epochs = initial_epochs + fine_tune_epochs
filepath = './checkpoint/finetune_v3.h5'
checkpoint = ModelCheckpoint(filepath=filepath, mode='max', monitor='val_accuracy', verbose=1, save_best_only=True)
callbacks_list = [checkpoint]
history_fine = model.fit(train_ds,
epochs=fine_tune_epochs,
validation_data=val_ds,
callbacks=callbacks_list)
# model performance check2
acc += history_fine.history['accuracy']
val_acc += history_fine.history['val_accuracy']
loss += history_fine.history['loss']
val_loss += history_fine.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.ylim([0.5, 1])
plt.plot([initial_epochs - 1, initial_epochs - 1],
plt.ylim(), label='Start Fine Tuning')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.ylim([0, 1.0])
plt.plot([initial_epochs - 1, initial_epochs - 1],
plt.ylim(), label='Start Fine Tuning')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
# plt.show()
# model.load_weights('./checkpoint/full_data_finetune.h5')
# model test
loss, accuracy = model.evaluate(test_ds)
print('Test accuracy :', accuracy)
model.save('efficientnetB7_full_data_train_done_v3')
import pickle
# Define the save names
base_filepath = ''.join('./trained_model/efficientnetB7_full_data_trained_v3')
json_filepath = base_filepath + '.json'
weights_filepath = base_filepath + '.h5'
pkl_filepath = base_filepath + '.pkl'
# save model and weights
model_json = model.to_json()
with open(json_filepath, "w") as json_file:
json_file.write(model_json)
model.save_weights(weights_filepath)
# save history
with open(pkl_filepath, 'wb') as history_file:
pickle.dump(history.history, history_file)
# pickle.dump(history_file)
def plot_model(history, epoch):
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epoch)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
def model_efficientnet_reduce_time():
files = tf.data.Dataset.list_files(str(data_dir + '\\*\\*'), shuffle=False)
num_files = len([file for file in glob(str(data_dir + '\\*\\*'))])
print(num_files)
class_names = np.array(sorted(categories))
print(class_names)
image_size = 600
batch_size = 16
preprocess_input = tf.keras.applications.efficientnet.preprocess_input
def get_label(file_path):
parts = tf.strings.split(file_path, os.path.sep)
one_hot = parts[-2] == class_names
return tf.argmax(tf.cast(one_hot, tf.int32))
def decode_img(img):
img = tf.image.decode_jpeg(img, channels=3)
return tf.image.resize(img, [image_size, image_size])
def process_TL(file_path):
label = get_label(file_path)
img = tf.io.read_file(file_path)
img = decode_img(img)
img = preprocess_input(img)
return img, label
AUTOTUNE = tf.data.AUTOTUNE
ds = files.interleave(lambda x: tf.data.Dataset.list_files(str(data_dir + '\\*\\*'), shuffle=True),
cycle_length=4).map(process_TL, num_parallel_calls=AUTOTUNE)
train_size = int(0.8 * num_files)
val_size = int(0.2 * num_files)
train_ds = ds.take(train_size)
val_ds = ds.skip(train_size)
train_ds = train_ds.repeat().batch(batch_size).prefetch(AUTOTUNE)
val_ds = val_ds.batch(batch_size).prefetch(AUTOTUNE)
data_augmentation = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'),
tf.keras.layers.experimental.preprocessing.RandomRotation(0.2),
tf.keras.layers.experimental.preprocessing.RandomTranslation(0.2, 0.2, fill_mode='constant')
], name='data_augmentation')
def create_model():
input_layer = tf.keras.layers.Input(shape=(image_size, image_size, 3))
x = data_augmentation(input_layer)
base_model = tf.keras.applications.EfficientNetB7(input_tensor=x,
include_top=False,
weights='imagenet')
base_model.trainable = False
x = tf.keras.layers.GlobalAveragePooling2D()(base_model.output)
x = tf.keras.layers.BatchNormalization()(x)
top_dropout_rate = 0.2
x = tf.keras.layers.Dropout(top_dropout_rate)(x)
outputs = tf.keras.layers.Dense(num_classes, activation='softmax')(x)
model = tf.keras.Model(input_layer, outputs)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
return model
model = create_model()
model.summary()
filepath = './checkpoint/full_data_v3_initial.h5'
checkpoint = ModelCheckpoint(filepath=filepath, mode='max', monitor='val_accuracy', verbose=1, save_best_only=True,
save_weights_only=False)
class MyThresholdCallback(tf.keras.callbacks.Callback):
def __init__(self, threshold):
super(MyThresholdCallback, self).__init__()
self.threshold = threshold
def on_epoch_end(self, epoch, logs=None):
val_acc = logs["val_accuracy"]
if val_acc >= self.threshold:
self.model.stop_training = True
my_callback = MyThresholdCallback(threshold=0.99)
history = model.fit(train_ds,
steps_per_epoch=int(train_size / batch_size),
validation_data=val_ds,
validation_steps=int(val_size / batch_size),
callbacks=[my_callback],
epochs=50)
model_efficientnet()
```
#### File: project_landmark_image_recognition/landmark_train_model/Image_To_Dataset.py
```python
import os
from pathlib import Path
import cv2
import numpy as np
from sklearn.model_selection import train_test_split
import json
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
import matplotlib.pyplot as plt
import tensorboard
import Tools
BASE_DIR = Path(__file__).resolve().parent
data_dir = os.path.join(BASE_DIR, "landmark")
json_dir = os.path.join(BASE_DIR, "landmark_json")
cropped_data_dir = os.path.join(BASE_DIR,'cropped_landmark')
categories = os.listdir(data_dir)
num_classes = len(categories)
def create_dataset_noncrop_np():
image_size = 128
X = []
Y = []
for idx, category in enumerate(categories):
label = [0 for i in range(num_classes)]
label[idx] = 1
category_dir = os.path.join(data_dir, category)
for top, dir, f in os.walk(category_dir):
for filename in f:
img_dir = os.path.join(category_dir, filename)
print(img_dir)
ff = np.fromfile(img_dir, np.uint8)
img = cv2.imdecode(ff, cv2.IMREAD_UNCHANGED)
img = cv2.resize(img, None, fx=image_size / img.shape[1], fy=image_size / img.shape[0])
X.append(img / 256)
Y.append(label)
X = np.array(X)
Y = np.array(Y)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y)
xy = (X_train, X_test, Y_train, Y_test)
np.savez("./img_data_noncrop.npz", xy)
def create_image_data_crop():
for idx, category in enumerate(categories):
label = [0 for i in range(num_classes)]
label[idx] = 1
category_dir = os.path.join(data_dir, category)
category_json_dir = os.path.join(json_dir, category)
cropped_save_dir = os.path.join(cropped_data_dir,category)
print(cropped_save_dir)
for top, dir, f in os.walk(category_dir):
for filename in f:
# 불러올 디렉터리 지정
img_dir = os.path.join(category_dir, filename)
img_json_dir = os.path.join(category_json_dir, filename[:-4] + '.json')
# 파일 불러오기
with open(img_json_dir, "r", encoding='UTF-8') as j:
img_json = json.load(j)
# 대부분의 경우, lx < rx, ly < ry
lx, ly, rx, ry = img_json['regions'][0]['boxcorners']
print(img_dir)
# 경로에 한글 포함시 우회
ff = np.fromfile(img_dir, np.uint8)
img = cv2.imdecode(ff, cv2.IMREAD_UNCHANGED)
# 이미지 자르기
crop_img = img[ly:ry, lx:rx]
# 예외처리. 가끔 lx > rx, ly > ry인 데이터 존재.
# 해당 상황에서 crop_img.shape가 [0,0,3]이 되는 현상 발견
if crop_img.shape[0] == 0:
crop_img = img[ry:ly, rx:lx]
print(crop_img.shape)
# 자른 이미지를 그대로 저장
img = crop_img
# 한글경로 인식
extension = os.path.splitext(filename)[1]
result, encoded_img = cv2.imencode(extension,img)
# 인코드 성공시 파일 경로에 저장
if result:
with open(os.path.join(cropped_save_dir,filename), mode="w+b") as f:
encoded_img.tofile(f)
create_image_data_crop()
``` |
{
"source": "1gog/clickhouse-mysql-data-reader",
"score": 3
} |
#### File: clickhouse_mysql/converter/chwriteconverter.py
```python
from clickhouse_mysql.converter.converter import Converter
import datetime
import decimal
import logging
class CHWriteConverter(Converter):
# do not include empty columns into converted row
delete_empty_columns = False
types_to_convert = [
datetime.timedelta,
bytes,
decimal.Decimal,
# jsonify
# object,
dict,
list,
# set - how to migrate MySQL's `set` type and tell it from `json` type - both of which are presented as `dict`?
set,
]
def __init__(self, column_skip):
logging.debug("CHWriteConverter __init__()")
super().__init__(column_skip=column_skip)
def column(self, column, value):
for _type in self.types_to_convert:
if isinstance(value, _type):
# print("Converting column", column, "of type", type(event.row[column]), event.row[column])
return str(value)
# print("Using asis column", column, "of type", type(event.row[column]))
return value
def row(self, row):
"""
Convert row
:param row: row to convert
:return: converted row
"""
if row is None:
return None
# init list of columns to delete
columns_to_delete = self.column_skip
for column in row:
# skip columns already prepared for deletion
if column in columns_to_delete:
continue
# convert column
row[column] = self.column(column, row[column])
# include empty column to the list of to be deleted columns
if (row[column] is None) and self.delete_empty_columns:
columns_to_delete.append(column)
# delete columns according to the list of columns to delete
for column in columns_to_delete:
row.pop(column)
return row
```
#### File: clickhouse-mysql-data-reader/clickhouse_mysql/daemon.py
```python
import os
import sys
import atexit
import signal
class Daemon(object):
pidfile = None
root = '/'
def __init__(self, pidfile='/tmp/daemon.pid', root='/'):
self.pidfile = pidfile
self.root = root
def background(self):
# first fork
# root process waits for the child in order not to have zombies in the system
pid = os.fork()
if pid > 0:
# parent - root process wait for first child and exits
os.wait()
sys.exit(0)
# first child
# setup own environment
os.chdir(self.root)
os.umask(0)
os.setsid()
# second fork
# first-fork child produces the real worker process and exits
# first-fork child is being waited now by root process
pid = os.fork()
if pid > 0:
sys.exit(0)
# worker
signal.signal(signal.SIGINT, self.shutdown)
signal.signal(signal.SIGTERM, self.shutdown)
# handle pid file
atexit.register(self.delete_pidfile)
self.write_pidfile()
# handle streams
self.redirect_std_streams()
def shutdown(self):
self.delete_pidfile()
sys.exit(0)
def redirect_std_streams(self):
sys.stdout.flush()
sys.stderr.flush()
stdin = open(os.devnull, 'r')
stdout = open(os.devnull, 'a+')
stderr = open(os.devnull, 'a+')
os.dup2(stdin.fileno(), sys.stdin.fileno())
os.dup2(stdout.fileno(), sys.stdout.fileno())
os.dup2(stderr.fileno(), sys.stderr.fileno())
def write_pidfile(self):
pid = str(os.getpid())
with open(self.pidfile, 'w+') as f:
f.write(pid)
def delete_pidfile(self):
try:
os.remove(self.pidfile)
except:
pass
def get_pid(self):
try:
with open(self.pidfile, 'r') as pf:
pid = int(pf.read().strip())
except:
pid = None
return pid
def start(self):
pid = self.get_pid()
if pid:
return False
self.background()
self.run()
def stop(self, sig=signal.SIGTERM):
pid = self.get_pid()
if not pid:
return False
try:
os.kill(pid, sig)
except OSError as err:
estr = str(err.args)
if estr.find("No such process") > 0:
self.delete_pidfile()
def restart(self):
self.stop()
self.start()
def run(self):
pass
``` |
{
"source": "1grasse/conflowgen",
"score": 3
} |
#### File: conflowgen/api/container_length_distribution_manager.py
```python
from typing import Dict
from conflowgen.domain_models.distribution_repositories.container_length_distribution_repository import \
ContainerLengthDistributionRepository
from conflowgen.domain_models.data_types.container_length import ContainerLength
class ContainerLengthDistributionManager:
"""
This manager provides the interface to set and get the container length distribution.
The default distribution is presented in the section
`Container Length Distribution <notebooks/input_distributions.ipynb#Container-Length-Distribution>`_.
"""
def __init__(self):
self.container_length_repository = ContainerLengthDistributionRepository()
def get_container_length_distribution(self) -> Dict[ContainerLength, float]:
"""
Returns:
The distribution of container lengths. Each length is assigned its frequency of showing up.
"""
return self.container_length_repository.get_distribution()
def set_container_length_distribution(
self,
container_lengths: Dict[ContainerLength, float]
) -> None:
"""
Set the assumed global distribution of container lengths. This is applied to all vehicles that arrive at the
terminal.
Args:
container_lengths: The distribution of container lengths and their corresponding frequency.
"""
self.container_length_repository.set_distribution(container_lengths)
```
#### File: conflowgen/api/container_storage_requirement_distribution_manager.py
```python
from typing import Dict
from conflowgen.domain_models.data_types.storage_requirement import StorageRequirement
from conflowgen.domain_models.data_types.container_length import ContainerLength
from conflowgen.domain_models.distribution_repositories.container_storage_requirement_distribution_repository import \
ContainerStorageRequirementDistributionRepository
class ContainerStorageRequirementDistributionManager:
"""
This manager provides the interface to set and get the storage requirement distribution.
"""
def __init__(self):
self.storage_requirement_repository = ContainerStorageRequirementDistributionRepository()
def get_storage_requirement_distribution(self) -> Dict[ContainerLength, Dict[StorageRequirement, float]]:
"""
Returns:
The distribution of storage requirements based on the length of the container.
"""
return self.storage_requirement_repository.get_distribution()
def set_storage_requirement_distribution(
self,
storage_requirements: Dict[ContainerLength, Dict[StorageRequirement, float]]
) -> None:
"""
Set the assumed global distribution of container storage requirements. This is applied to all vehicles that
arrive at the terminal.
Args:
storage_requirements: The distribution of storage requirements depending on the container length.
"""
self.storage_requirement_repository.set_distribution(storage_requirements)
```
#### File: conflowgen/api/port_call_manager.py
```python
from __future__ import annotations
import datetime
from typing import List, Tuple, Optional
from conflowgen.domain_models.factories.schedule_factory import ScheduleFactory
from conflowgen.domain_models.data_types.mode_of_transport import ModeOfTransport
class PortCallManager:
"""
This manager provides the interface to creates schedules for services that periodically call the container terminal,
e.g. ships of any size and trains. This explicitly does not cover the trucks which arrive according to a
probability distribution set at
:class:`.TruckArrivalDistributionManager`.
"""
def __init__(self):
self.schedule_factory = ScheduleFactory()
def add_large_scheduled_vehicle(
self,
vehicle_type: ModeOfTransport,
service_name: str,
vehicle_arrives_at: datetime.date,
vehicle_arrives_at_time: datetime.time,
average_vehicle_capacity: int,
average_moved_capacity: int,
next_destinations: Optional[List[Tuple[str, float]]] = None,
vehicle_arrives_every_k_days: Optional[int] = None
) -> None:
"""
Add the schedule of a ship of any size or a train. The concrete vehicle instances are automatically generated
when :meth:`.ContainerFlowGenerationManager.generate` is invoked.
Args:
vehicle_type: One of
:class:`ModeOfTransport.deep_sea_vessel`,
:class:`ModeOfTransport.feeder`,
:class:`ModeOfTransport.barge`, or
:class:`ModeOfTransport.train`
service_name:
The name of the service, i.e. the shipping line or rail freight line
vehicle_arrives_at:
A date the service would arrive at the terminal. This can e.g. point at the week day for weekly
services. In any case, this is combined with the parameter ``vehicle_arrives_every_k_days`` and only
arrivals within the time scope between ``start_date`` and ``end_date`` are considered.
vehicle_arrives_at_time:
A time at the day (between 00:00 and 23:59).
average_vehicle_capacity:
Number of TEU that can be transported with the vehicle at most.
average_moved_capacity:
Number of TEU which is imported.
next_destinations:
Pairs of destination and frequency of the destination being chosen.
vehicle_arrives_every_k_days:
Defaults to weekly services (arrival every 7 days). Other frequencies are possible as well.
In the special case of ``-1``, only a single arrival at the day ``vehicle_arrives_at`` is scheduled.
This arrival is only part of the generated container flow if that arrival lies between ``start_date``
and ``end_date``.
"""
assert vehicle_type in ModeOfTransport.get_scheduled_vehicles(), f"Vehicle of type {vehicle_type} is not " \
f"suitable as is does not periodically arrive."
self.schedule_factory.add_schedule(
vehicle_type=vehicle_type,
service_name=service_name,
vehicle_arrives_at=vehicle_arrives_at,
vehicle_arrives_at_time=vehicle_arrives_at_time,
average_vehicle_capacity=average_vehicle_capacity,
average_moved_capacity=average_moved_capacity,
next_destinations=next_destinations,
vehicle_arrives_every_k_days=vehicle_arrives_every_k_days
)
def has_schedule(
self,
service_name: str,
vehicle_type: ModeOfTransport
) -> bool:
"""
Args:
service_name: The name of the service which moves to a schedule that is sought for.
vehicle_type: The mode of transport to restrict the search to.
Returns:
Whether the requested schedule already exist in the database
"""
assert vehicle_type in ModeOfTransport.get_scheduled_vehicles(), f"Vehicle of type {vehicle_type} not " \
f"suitable for this method."
return self.schedule_factory.get_schedule(service_name, vehicle_type) is not None
```
#### File: application/repositories/container_flow_generation_properties_repository.py
```python
from conflowgen.application.models.container_flow_generation_properties import ContainerFlowGenerationProperties
class DuplicatedContainerFlowGenerationPropertiesEntryException(Exception):
pass
class InvalidTimeRangeException(Exception):
pass
class MinimumNotStrictlySmallerThanMaximumException(Exception):
pass
class ContainerFlowGenerationPropertiesRepository:
@staticmethod
def _verify(properties) -> None:
if properties.end_date < properties.start_date:
raise InvalidTimeRangeException(
f"start date '{properties.start_date}' is later than end date '{properties.end_date}'"
)
if (properties.minimum_dwell_time_of_import_containers_in_hours
>= properties.maximum_dwell_time_of_import_containers_in_hours):
raise MinimumNotStrictlySmallerThanMaximumException(
f"{properties.minimum_dwell_time_of_import_containers_in_hours} "
f">= {properties.maximum_dwell_time_of_import_containers_in_hours}"
)
if (properties.minimum_dwell_time_of_export_containers_in_hours
>= properties.maximum_dwell_time_of_export_containers_in_hours):
raise MinimumNotStrictlySmallerThanMaximumException(
f"{properties.minimum_dwell_time_of_export_containers_in_hours} "
f">= {properties.maximum_dwell_time_of_export_containers_in_hours}"
)
@staticmethod
def get_container_flow_generation_properties() -> ContainerFlowGenerationProperties:
all_properties = ContainerFlowGenerationProperties.select().execute()
number_found_rows = len(all_properties)
if not (0 <= number_found_rows <= 1):
raise DuplicatedContainerFlowGenerationPropertiesEntryException(
f"Number of found rows were {number_found_rows} but expected only one entry"
)
if len(all_properties) == 1:
return all_properties[0]
return ContainerFlowGenerationProperties.create()
@classmethod
def set_container_flow_generation_properties(cls, properties: ContainerFlowGenerationProperties) -> None:
cls._verify(properties)
properties.save()
number_properties_entries: int = ContainerFlowGenerationProperties().select().count()
if number_properties_entries > 1:
raise DuplicatedContainerFlowGenerationPropertiesEntryException(
f"Number of updated rows were {number_properties_entries} but expected only one entry"
)
```
#### File: conflowgen/database_connection/sqlite_database_connection.py
```python
import logging
import os
from typing import List, Tuple, Optional
from peewee import SqliteDatabase
from conflowgen.database_connection.create_tables import create_tables
from conflowgen.domain_models.base_model import database_proxy
from conflowgen.domain_models.distribution_seeders import seed_all_distributions
class SqliteDatabaseIsMissingException(Exception):
pass
class SqliteDatabaseAlreadyExistsException(Exception):
pass
class AmbiguousParameterException(Exception):
pass
class SqliteDatabaseConnection:
"""
The SQLite database stores all content from the API calls to enable reproducible results.
See :class:`.DatabaseChooser` for more information.
"""
SQLITE_DEFAULT_SETTINGS = {
# compare with recommended settings from
# https://docs.peewee-orm.com/en/latest/peewee/database.html
'journal_mode': 'wal',
'cache_size': -32 * 1024, # counted in KiB, thus this means 32 MB cache
'foreign_keys': 1,
'ignore_check_constraints': 0,
'synchronous': 0
}
SQLITE_DEFAULT_DIR = os.path.abspath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir,
"data",
"databases"
)
)
def __init__(self, sqlite_databases_directory: Optional[str] = None):
if sqlite_databases_directory is None:
sqlite_databases_directory = self.SQLITE_DEFAULT_DIR
sqlite_databases_directory = os.path.abspath(sqlite_databases_directory)
self.sqlite_databases_directory = sqlite_databases_directory
self.logger = logging.getLogger("conflowgen")
if not os.path.isdir(self.sqlite_databases_directory):
self.logger.debug(f"Creating SQLite directory at {sqlite_databases_directory}")
os.makedirs(self.sqlite_databases_directory, exist_ok=True)
self.sqlite_db_connection = None
def list_all_sqlite_databases(self) -> List[str]:
sqlite_databases = [
_file for _file
in os.listdir(self.sqlite_databases_directory)
if _file.endswith(".sqlite")
]
return sqlite_databases
def choose_database(
self,
database_name: str,
create: bool = False,
reset: bool = False,
**seeder_options
) -> SqliteDatabase:
if database_name == ":memory:":
path_to_sqlite_database = ":memory:"
sqlite_database_existed_before = False
else:
path_to_sqlite_database, sqlite_database_existed_before = self._load_or_create_sqlite_file_on_hard_drive(
database_name=database_name, create=create, reset=reset
)
self.logger.debug(f"Opening file {path_to_sqlite_database}")
self.sqlite_db_connection = SqliteDatabase(
path_to_sqlite_database,
pragmas=self.SQLITE_DEFAULT_SETTINGS
)
database_proxy.initialize(self.sqlite_db_connection)
self.sqlite_db_connection.connect()
self.logger.debug(f'journal_mode: {self.sqlite_db_connection.journal_mode}')
self.logger.debug(f'cache_size: {self.sqlite_db_connection.cache_size}')
self.logger.debug(f'page_size: {self.sqlite_db_connection.page_size}')
self.logger.debug(f'foreign_keys: {self.sqlite_db_connection.foreign_keys}')
if not sqlite_database_existed_before:
self.logger.debug(f"Creating new database: '{path_to_sqlite_database}'")
create_tables(self.sqlite_db_connection)
self.logger.debug("Seed with default values...")
seed_all_distributions(**seeder_options)
else:
self.logger.debug(f"Open existing database: '{path_to_sqlite_database}'")
return self.sqlite_db_connection
def delete_database(self, database_name: str) -> None:
path_to_sqlite_database = self._get_path_to_database(database_name)
if os.path.isfile(path_to_sqlite_database):
self.logger.debug(f"Deleting database at {path_to_sqlite_database}")
os.remove(path_to_sqlite_database)
else:
raise SqliteDatabaseIsMissingException(path_to_sqlite_database)
def _load_or_create_sqlite_file_on_hard_drive(
self, database_name: str, create: bool, reset: bool
) -> Tuple[str, bool]:
path_to_sqlite_database = self._get_path_to_database(database_name)
sqlite_database_existed_before = os.path.isfile(path_to_sqlite_database)
if sqlite_database_existed_before:
if create and not reset:
raise SqliteDatabaseAlreadyExistsException(path_to_sqlite_database)
if reset:
self.logger.debug(f"Deleting old database at {path_to_sqlite_database}")
os.remove(path_to_sqlite_database)
else:
if not create:
raise SqliteDatabaseIsMissingException(path_to_sqlite_database)
if create:
self.logger.debug(f"No previous database detected, creating new at {path_to_sqlite_database}")
return path_to_sqlite_database, sqlite_database_existed_before
def _get_path_to_database(self, database_name: str) -> str:
return os.path.join(
self.sqlite_databases_directory,
database_name
)
```
#### File: domain_models/data_types/container_length.py
```python
from __future__ import annotations
import enum
import enum_tools.documentation
@enum_tools.documentation.document_enum
class ContainerLength(enum.Enum):
"""
The container length is one of the most important factors of how much space a container occupies. Here, the most
common container sizes (neglecting height) are represented.
"""
twenty_feet = 20 # doc: A twenty-foot container
forty_feet = 40 # doc: A forty-foot container
forty_five_feet = 45 # doc: A forty-five-foot container
other = -1 # doc: Any other length usually does not fit into the standardized slots and handling processes.
@classmethod
def get_factor(cls, container_length: ContainerLength) -> float:
"""
Each container occupies a certain amount of space when stored which is expressed in TEU.
.. note::
.. autodata:: conflowgen.domain_models.data_types.container_length.CONTAINER_LENGTH_TO_OCCUPIED_TEU
Args:
container_length: The length of the container
Returns:
The TEU factor of the container
"""
return CONTAINER_LENGTH_TO_OCCUPIED_TEU[container_length]
def __str__(self) -> str:
"""
The textual representation is e.g. '20 feet' instead of '<ContainerLength.twenty_feet>' so it is easier to read
in the logs.
"""
if self.value > 0:
return f"{self.value} feet"
return "other"
@classmethod
def cast_element_type(cls, text: str) -> ContainerLength | None:
"""
Args:
text: The text to parse
Returns:
The container length enum name if the cast was successful, ``None`` otherwise.
"""
if text == "other":
return cls.other
feet_suffix = " feet"
if text.endswith(feet_suffix):
number_part = text[:-len(feet_suffix)]
if not number_part.isdigit():
return None
casted_number_part = int(number_part)
return cls(casted_number_part)
return None
CONTAINER_LENGTH_TO_OCCUPIED_TEU = {
ContainerLength.twenty_feet: 1,
ContainerLength.forty_feet: 2,
ContainerLength.forty_five_feet: 2.25,
ContainerLength.other: 2.5
}
"""
This is the translation table to specify the occupied storage space in TEU.
For 20', 40', and 45', the typical values are picked.
The TEU factor for the value 'other' is chosen to be rather large because it is assumed to be difficult to find a proper
storage position.
"""
```
#### File: domain_models/distribution_repositories/__init__.py
```python
import logging
import math
from typing import Dict, Any
logger = logging.getLogger("conflowgen")
def normalize_nested_distribution(distributions: Dict[Any, Dict[Any, float]]) -> Dict[Any, Dict[Any, float]]:
normalized_distributions = {}
for first_key, distribution in distributions.items():
second_keys, fractions = zip(*distribution.items())
sum_of_fractions = sum(fractions)
if not math.isclose(sum_of_fractions, 1):
logger.debug(f"Sum of fractions was not 1 for '{first_key}' and was automatically normalized.")
fractions = [fraction / sum_of_fractions for fraction in fractions]
for fraction in fractions:
assert fraction >= 0
normalized_distributions[first_key] = dict(zip(second_keys, fractions))
return normalized_distributions
```
#### File: domain_models/distribution_seeders/__init__.py
```python
from conflowgen.domain_models.distribution_seeders import mode_of_transport_distribution_seeder, \
container_weight_distribution_seeder, container_length_distribution_seeder, truck_arrival_distribution_seeder, \
container_storage_requirement_distribution_seeder
def seed_all_distributions(**options) -> None:
"""
Seeds all databases with default values
Args:
**options: This allows to select different default values that are passed through to the seeder functions.
"""
mode_of_transport_distribution_seeder.seed()
container_weight_distribution_seeder.seed()
container_length_distribution_seeder.seed()
if "assume_tas" in options:
truck_arrival_distribution_seeder.seed(assume_tas=options["assume_tas"])
else:
truck_arrival_distribution_seeder.seed()
container_storage_requirement_distribution_seeder.seed()
```
#### File: conflowgen/domain_models/distribution_validators.py
```python
from __future__ import annotations
import copy
import math
from typing import Dict, Type, Any, Optional
import enum
class DistributionHasNoElementsException(Exception):
pass
class DistributionElementIsMissingException(Exception):
pass
class DistributionProbabilityOutOfRange(Exception):
pass
class DistributionProbabilitiesUnequalOne(Exception):
pass
class DistributionElementIsInvalidException(Exception):
pass
ABSOLUTE_TOLERANCE = 0.05
"""
The absolute tolerance when comparing the sum of all frequencies with 1.
"""
def _format_dependent_variable(dependent_variable: enum.Enum):
return f"the dependent variable '{dependent_variable}'"
def _format_entry(value: Any):
if str(value).replace('.', '', 1).replace('-', '', 1).isdigit():
return f"{value:.5f}"
if isinstance(value, enum.Enum):
return str(value)
if isinstance(value, dict):
return "{...}"
return "..."
def _format_entries(values: Any) -> str:
return str([
_format_entry(value)
for value in values
])
def _format_distribution(distribution: Dict[enum.Enum, Any]) -> str:
entries = []
for enum_type_key, value in distribution.items():
entry = repr(str(enum_type_key)) + ": " + _format_entry(value)
entries.append(entry)
text = "{"
text += ", ".join(entries)
text += "}"
return text
def _cast_element_type(actual_element: Any, desired_element_type: Type[enum.Enum]) -> enum.Enum | None:
"""
Args:
actual_element: The key the user provided
desired_element_type: The required element type
Returns:
Is the actual element of the desired element type?
"""
if isinstance(actual_element, desired_element_type):
return actual_element # no casting required
try: # check if it is a valid value
return desired_element_type(actual_element)
except ValueError:
pass
try: # check if it is a valid name
return desired_element_type[actual_element]
except KeyError:
pass
if hasattr(desired_element_type, "cast_element_type"): # use enum custom code if available
return desired_element_type.cast_element_type(actual_element)
return None
def _check_all_required_keys_are_set_in_distribution(
distribution: Dict[enum.Enum, Any],
desired_element_type: Type[enum.Enum],
context: Optional[str] = None
) -> Dict[enum.Enum, Any]:
sanitized_distribution = copy.deepcopy(distribution)
if len(distribution) == 0:
msg = "The distribution does not have any elements to draw from."
if context is not None:
msg += f" This is error occurred while examining {context}."
raise DistributionHasNoElementsException(msg)
provided_elements_in_distribution = list(distribution.keys())
for element in provided_elements_in_distribution:
casted_element = _cast_element_type(element, desired_element_type)
if casted_element is None:
raise DistributionElementIsInvalidException(
f"Element '{element}' could not be casted to type '{desired_element_type}'"
)
if casted_element != element:
sanitized_distribution[casted_element] = sanitized_distribution.pop(element) # update key
theoretically_available_elements = [
desired_element_type(el)
for el in desired_element_type.__members__.values()
]
if not set(sanitized_distribution.keys()) == set(theoretically_available_elements):
msg = (f"The distribution {_format_distribution(distribution)} was expected to have the following elements: "
f"{_format_entries(theoretically_available_elements)} but it provided the following elements: "
f"{_format_entries(provided_elements_in_distribution)}.")
if context is not None:
msg += f" This is error occurred while examining {context}."
raise DistributionElementIsMissingException(msg)
return sanitized_distribution
def _check_value_range_of_frequencies_in_distribution(
distribution: Dict[enum.Enum, float],
context: Optional[str] = None
) -> None:
sum_of_probabilities = 0
for element, probability in distribution.items():
if not (0 <= probability <= 1):
msg = (
"The probability of an element to be drawn must range between 0 and 1 "
f"but for the element '{element}' the probability was {probability} in the distribution "
f"{_format_distribution(distribution)}."
)
if context is not None:
msg += f" This is error occurred while examining {context}."
raise DistributionProbabilityOutOfRange(msg)
sum_of_probabilities += probability
if not math.isclose(sum_of_probabilities, 1, abs_tol=ABSOLUTE_TOLERANCE):
msg = (
"The sum of all probabilities should sum to 1 "
f"but for the distribution {_format_distribution(distribution)} the sum was {sum_of_probabilities:.5f}."
)
if context is not None:
msg += f" This is error occurred while examining {context}."
raise DistributionProbabilitiesUnequalOne(msg)
def validate_distribution_with_no_dependent_variables(
distribution: Dict[enum.Enum, float],
key_type: Type[enum.Enum]
) -> Dict[enum.Enum, Dict[enum.Enum, float]]:
sanitized_distribution = _check_all_required_keys_are_set_in_distribution(distribution, key_type)
_check_value_range_of_frequencies_in_distribution(sanitized_distribution)
return sanitized_distribution
def validate_distribution_with_one_dependent_variable(
distribution: Dict[enum.Enum, Dict[enum.Enum, float]],
key_type_of_independent_variable: Type[enum.Enum],
key_type_of_dependent_variable: Type[enum.Enum]
) -> Dict[enum.Enum, Dict[enum.Enum, float]]:
sanitized_distribution = _check_all_required_keys_are_set_in_distribution(
distribution, key_type_of_independent_variable
)
for dependent_variable, distribution_of_dependent_variable in sanitized_distribution.items():
sanitized_distribution_of_dependent_variable = _check_all_required_keys_are_set_in_distribution(
distribution_of_dependent_variable,
key_type_of_dependent_variable,
context=_format_dependent_variable(dependent_variable)
)
_check_value_range_of_frequencies_in_distribution(
sanitized_distribution_of_dependent_variable,
context=_format_dependent_variable(dependent_variable)
)
sanitized_distribution[dependent_variable] = sanitized_distribution_of_dependent_variable
return sanitized_distribution
```
#### File: domain_models/field_types/mode_of_transport.py
```python
from __future__ import annotations
from peewee import TextField
from conflowgen.domain_models.data_types.mode_of_transport import ModeOfTransport
from conflowgen.domain_models.field_types.enum_database_field import cast_to_db_value
class ModeOfTransportField(TextField):
"""
This class enable an enum-like field for Peewee
"""
def db_value(self, value):
return cast_to_db_value(value)
def python_value(self, value):
return ModeOfTransport(value)
```
#### File: conflowgen/posthoc_analyses/inbound_and_outbound_vehicle_capacity_analysis_report.py
```python
from __future__ import annotations
from conflowgen.posthoc_analyses.inbound_and_outbound_vehicle_capacity_analysis import \
InboundAndOutboundVehicleCapacityAnalysis
from conflowgen.reporting import AbstractReportWithMatplotlib
class InboundAndOutboundVehicleCapacityAnalysisReport(AbstractReportWithMatplotlib):
"""
This analysis report takes the data structure as generated by :class:`.InboundAndOutboundVehicleCapacityAnalysis`
and creates a comprehensible representation for the user, either as text or as a graph.
"""
report_description = """
Analyze the vehicle capacity by vehicle type for the inbound and outbound journeys and check for the maximum
capacity of each vehicle type.
If e.g. for the vehicle type 'feeder' the maximum outbound capacity is used up, most likely there are more vehicles
that deliver containers destined for feeder vessels than there are feeder vessels planned during the period of data
generation (between `start_date` and `end_date`).
"""
def __init__(self):
super().__init__()
self.analysis = InboundAndOutboundVehicleCapacityAnalysis(
transportation_buffer=self.transportation_buffer
)
def get_report_as_text(self) -> str:
inbound_capacities, outbound_actual_capacities, outbound_maximum_capacities = self._get_capacities()
# create string representation
report = "\n"
report += "vehicle type "
report += "inbound capacity "
report += "outbound actual capacity "
report += "outbound max capacity"
report += "\n"
for vehicle_type in self.order_of_vehicle_types_in_report:
vehicle_type_as_text = str(vehicle_type).replace("_", " ")
report += f"{vehicle_type_as_text:<15} "
report += f"{inbound_capacities[vehicle_type]:>16.1f} "
report += f"{outbound_actual_capacities[vehicle_type]:>24.1f} "
report += f"{outbound_maximum_capacities[vehicle_type]:>21.1f}"
report += "\n"
report += "(rounding errors might exist)\n"
return report
def get_report_as_graph(self) -> object:
"""
The report as a graph is represented as a bar chart using pandas.
Returns:
The matplotlib axis of the bar chart.
"""
import pandas as pd # pylint: disable=import-outside-toplevel
import seaborn as sns # pylint: disable=import-outside-toplevel
sns.set_palette(sns.color_palette())
inbound_capacities, outbound_actual_capacities, outbound_maximum_capacities = self._get_capacities()
df = pd.DataFrame({
"inbound capacities": inbound_capacities,
"outbound actual capacities": outbound_actual_capacities,
"outbound maximum capacities": outbound_maximum_capacities
})
df.index = [str(i).replace("_", " ") for i in df.index]
ax = df.plot.barh()
ax.set_xlabel("Capacity (in TEU)")
ax.set_title("Inbound and outbound vehicle capacity analysis")
return ax
def _get_capacities(self):
assert self.transportation_buffer is not None
self.analysis.update(
transportation_buffer=self.transportation_buffer
)
# gather data
inbound_capacities = self.analysis.get_inbound_capacity_of_vehicles()
outbound_actual_capacities, outbound_maximum_capacities = self.analysis.get_outbound_capacity_of_vehicles()
return inbound_capacities, outbound_actual_capacities, outbound_maximum_capacities
```
#### File: conflowgen/posthoc_analyses/inbound_to_outbound_vehicle_capacity_utilization_analysis.py
```python
from __future__ import annotations
from typing import Dict, NamedTuple, Tuple
from conflowgen.domain_models.container import Container
from conflowgen.domain_models.data_types.container_length import ContainerLength
from conflowgen.domain_models.data_types.mode_of_transport import ModeOfTransport
from conflowgen.domain_models.large_vehicle_schedule import Schedule
from conflowgen.domain_models.vehicle import LargeScheduledVehicle
from conflowgen.posthoc_analyses.abstract_posthoc_analysis import AbstractPostHocAnalysis
class CompleteVehicleIdentifier(NamedTuple):
"""
A vehicle identifier is a composition of the vehicle type, its service name, and the actual vehicle name
"""
mode_of_transport: ModeOfTransport
service_name: str
vehicle_name: str
class InboundToOutboundVehicleCapacityUtilizationAnalysis(AbstractPostHocAnalysis):
"""
This analysis can be run after the synthetic data has been generated.
The analysis returns a data structure that can be used for generating reports (e.g., in text or as a figure)
as it is the case with :class:`.InboundToOutboundCapacityUtilizationAnalysisReport`.
"""
def __init__(self, transportation_buffer: float):
super().__init__(
transportation_buffer=transportation_buffer
)
@staticmethod
def get_inbound_and_outbound_capacity_of_each_vehicle(
vehicle_type="all"
) -> Dict[CompleteVehicleIdentifier, Tuple[float, float]]:
"""
Args:
vehicle_type: Either ``"all"``, a single vehicle of type :class:`.ModeOfTransport` or a whole collection of
vehicle types, e.g. passed as a :class:`list` or :class:`set`.
Only the vehicles that correspond to the provided vehicle type(s) are considered in the analysis.
Returns:
The transported containers of each vehicle on their inbound and outbound journey in TEU.
"""
capacities: Dict[CompleteVehicleIdentifier, (float, float)] = {}
base_selection = LargeScheduledVehicle.select().join(Schedule)
if vehicle_type == "all":
selected_large_scheduled_vehicles = base_selection
else:
if vehicle_type in set(ModeOfTransport):
selected_large_scheduled_vehicles = base_selection.where(
LargeScheduledVehicle.schedule.vehicle_type == vehicle_type
)
else: # assume it is some kind of collection (list, set, ...)
selected_large_scheduled_vehicles = base_selection.where(
LargeScheduledVehicle.schedule.vehicle_type << vehicle_type
)
vehicle: LargeScheduledVehicle
for vehicle in selected_large_scheduled_vehicles:
vehicle_schedule: Schedule = vehicle.schedule
mode_of_transport = vehicle_schedule.vehicle_type
service_name = vehicle_schedule.service_name
vehicle_name = vehicle.vehicle_name
used_capacity_on_inbound_journey = vehicle.moved_capacity
used_capacity_on_outbound_journey = 0
container: Container
for container in Container.select().where(
Container.picked_up_by_large_scheduled_vehicle == vehicle
):
teu_factor_of_container: float = ContainerLength.get_factor(container.length)
used_capacity_on_outbound_journey += teu_factor_of_container
vehicle_id = CompleteVehicleIdentifier(
mode_of_transport=mode_of_transport,
service_name=service_name,
vehicle_name=vehicle_name
)
capacities[vehicle_id] = (used_capacity_on_inbound_journey, used_capacity_on_outbound_journey)
return capacities
```
#### File: conflowgen/posthoc_analyses/inbound_to_outbound_vehicle_capacity_utilization_analysis_report.py
```python
from __future__ import annotations
from typing import Tuple, Any, Dict
import matplotlib.pyplot as plt
import matplotlib.ticker
import pandas as pd
from conflowgen.posthoc_analyses.inbound_to_outbound_vehicle_capacity_utilization_analysis import \
InboundToOutboundVehicleCapacityUtilizationAnalysis, CompleteVehicleIdentifier
from conflowgen.reporting import AbstractReportWithMatplotlib
class InboundToOutboundVehicleCapacityUtilizationAnalysisReport(AbstractReportWithMatplotlib):
"""
This analysis report takes the data structure as generated by :class:`.InboundToOutboundCapacityUtilizationAnalysis`
and creates a comprehensible representation for the user, either as text or as a graph.
"""
report_description = """
Analyze the used vehicle capacity for each vehicle for the inbound and outbound journeys.
Generally, it is striven for to reach an equilibrium - each vehicle should approximately pick up as many containers
at the container terminal as it has delivered to.
Great disparities between the transported capacities on the inbound and outbound journey are considered noteworthy
but depending on the input data it might be acceptable.
"""
maximum_length_for_readable_name = 50 # doc: Each vehicle has a name that might be a bit lengthy for text output
def __init__(self):
super().__init__()
self.analysis = InboundToOutboundVehicleCapacityUtilizationAnalysis(
transportation_buffer=self.transportation_buffer
)
@classmethod
def _create_readable_name(cls, vehicle_identifier: Tuple[Any]) -> str:
name = "-".join(str(part) for part in vehicle_identifier)
if len(name) > cls.maximum_length_for_readable_name:
name = name[:46] + "..."
return name
def get_report_as_text(self, **kwargs) -> str:
"""
The report as a text is represented as a table suitable for logging. It uses a human-readable formatting style.
Keyword Args:
vehicle_type: Either ``"all"``, a single vehicle of type :class:`.ModeOfTransport` or a whole collection of
vehicle types, e.g. passed as a :class:`list` or :class:`set`.
For the exact interpretation of the parameter, check
:class:`.InboundToOutboundVehicleCapacityUtilizationAnalysis`.
Returns:
The report in text format (possibly spanning over several lines).
"""
capacities = self._get_capacities_depending_on_vehicle_type(kwargs)
report = "\n"
report += "vehicle identifier "
report += "inbound capacity (in TEU) "
report += "outbound capacity (in TEU)"
report += "\n"
for vehicle_identifier, (used_inbound_capacity, used_outbound_capacity) in capacities.items():
vehicle_name = self._create_readable_name(vehicle_identifier)
report += f"{vehicle_name:<50} " # align this with cls.maximum_length_for_readable_name!
report += f"{used_inbound_capacity:>25.1f} "
report += f"{used_outbound_capacity:>26.1f}"
report += "\n"
if len(capacities) == 0:
report += "--no vehicles exist--\n"
else:
report += "(rounding errors might exist)\n"
return report
def get_report_as_graph(self, **kwargs) -> object:
"""
The report as a graph is represented as a scatter plot using pandas.
Keyword Args:
plot_type: Either "absolute" or "relative".
vehicle_type: Either ``"all"``, a single vehicle of type :class:`.ModeOfTransport` or a whole collection of
vehicle types, e.g. passed as a :class:`list` or :class:`set`.
For the exact interpretation of the parameter, check
:class:`.InboundToOutboundVehicleCapacityUtilizationAnalysis`.
Returns:
The matplotlib axis of the scatter plot.
"""
plot_type = kwargs.get("plot_type", "absolute")
df = self._convert_analysis_to_df(kwargs)
if plot_type == "absolute":
ax = self._plot_absolute_values(df)
elif plot_type == "relative":
ax = self._plot_relative_values(df)
else:
raise Exception(f"Plot type '{plot_type}' is not supported.")
plt.legend(
loc='lower left',
bbox_to_anchor=(1, 0),
fancybox=True,
)
return ax
def _plot_absolute_values(self, df: pd.DataFrame) -> matplotlib.pyplot.axis:
ax = df.plot.scatter(x="inbound capacity (fixed)", y="used outbound capacity")
slope = 1 + self.transportation_buffer
ax.axline((0, 0), slope=slope, color='black', label='Maximum outbound capacity')
ax.axline((0, 0), slope=1, color='gray', label='Equilibrium')
ax.set_title("Inbound to outbound capacity utilization analysis (absolute)")
ax.set_aspect('equal', adjustable='box')
ax.grid(color='lightgray', linestyle=':', linewidth=.5)
maximum = df[["inbound capacity (fixed)", "used outbound capacity"]].max(axis=1).max(axis=0)
axis_limitation = maximum * 1.1 # add some white space to the top and left
ax.set_xlim([0, axis_limitation])
ax.set_ylim([0, axis_limitation])
loc = matplotlib.ticker.MultipleLocator(base=25)
ax.xaxis.set_major_locator(loc)
return ax
def _plot_relative_values(self, df: pd.DataFrame) -> matplotlib.pyplot.axis:
ax = df.plot.scatter(x="inbound capacity (fixed)", y="ratio")
ax.axline((0, (1 + self.transportation_buffer)), slope=0, color='black', label='Maximum outbound capacity')
ax.axline((0, 1), slope=0, color='gray', label='Equilibrium')
ax.set_title("Inbound to outbound capacity utilization analysis (relative)")
ax.grid(color='lightgray', linestyle=':', linewidth=.5)
return ax
def _convert_analysis_to_df(self, kwargs: dict) -> pd.DataFrame:
capacities = self._get_capacities_depending_on_vehicle_type(kwargs)
rows = []
for vehicle_identifier, (inbound_capacity, used_outbound_capacity) in capacities.items():
vehicle_name = self._create_readable_name(vehicle_identifier)
rows.append({
"vehicle name": vehicle_name,
"inbound capacity (fixed)": inbound_capacity,
"used outbound capacity": used_outbound_capacity
})
df = pd.DataFrame(rows)
df["ratio"] = df["used outbound capacity"] / df["inbound capacity (fixed)"]
return df
def _get_capacities_depending_on_vehicle_type(self, kwargs) -> Dict[CompleteVehicleIdentifier, Tuple[float, float]]:
if "vehicle_type" in kwargs:
vehicle_type = kwargs["vehicle_type"]
capacities = self.analysis.get_inbound_and_outbound_capacity_of_each_vehicle(
vehicle_type=vehicle_type
)
else:
capacities = self.analysis.get_inbound_and_outbound_capacity_of_each_vehicle()
return capacities
```
#### File: application_models/repositories/test_container_stream_generation_properties_repository.py
```python
import datetime
import unittest
from conflowgen.application.models.container_flow_generation_properties import ContainerFlowGenerationProperties
from conflowgen.application.repositories.container_flow_generation_properties_repository import \
ContainerFlowGenerationPropertiesRepository, InvalidTimeRangeException, \
DuplicatedContainerFlowGenerationPropertiesEntryException, MinimumNotStrictlySmallerThanMaximumException
from conflowgen.tests.substitute_peewee_database import setup_sqlite_in_memory_db
class TestContainerFlowGenerationProperties(unittest.TestCase):
def setUp(self) -> None:
"""Create container database in memory"""
self.sqlite_db = setup_sqlite_in_memory_db()
self.sqlite_db.create_tables([
ContainerFlowGenerationProperties
])
self.repository = ContainerFlowGenerationPropertiesRepository()
def test_get_default_values(self):
properties = self.repository.get_container_flow_generation_properties()
self.assertIsNone(properties.name)
self.assertIsNone(properties.start_date)
self.assertIsNone(properties.end_date)
self.assertLessEqual(
properties.generated_at - datetime.datetime.now(),
datetime.timedelta(minutes=1)
)
self.assertLessEqual(
properties.last_updated_at - datetime.datetime.now(),
datetime.timedelta(minutes=1)
)
def test_properties_creation_behavior_with_get(self):
all_properties_first = list(ContainerFlowGenerationProperties.select())
self.assertEqual(all_properties_first, [])
properties_1 = self.repository.get_container_flow_generation_properties()
self.assertIsInstance(properties_1, ContainerFlowGenerationProperties)
all_properties_second = list(ContainerFlowGenerationProperties.select())
self.assertEqual(len(all_properties_second), 1)
properties_2 = self.repository.get_container_flow_generation_properties()
self.assertIsInstance(properties_2, ContainerFlowGenerationProperties)
all_properties_third = list(ContainerFlowGenerationProperties.select())
self.assertEqual(len(all_properties_third), 1)
def test_set_values(self):
properties = self.repository.get_container_flow_generation_properties()
name = "Test"
properties.name = name
start_date = datetime.datetime.now()
properties.start_date = start_date
end_date = datetime.datetime.now() + datetime.timedelta(days=5)
properties.end_date = end_date
self.repository.set_container_flow_generation_properties(properties)
loaded_properties = self.repository.get_container_flow_generation_properties()
self.assertEqual(loaded_properties.name, name)
self.assertEqual(loaded_properties.start_date, start_date.date())
self.assertEqual(loaded_properties.end_date, end_date.date())
self.assertLessEqual(
properties.generated_at - datetime.datetime.now(),
datetime.timedelta(minutes=1)
)
self.assertLessEqual(
properties.last_updated_at - datetime.datetime.now(),
datetime.timedelta(minutes=1)
)
def test_broken_properties__end_date_too_early(self):
with self.assertRaises(InvalidTimeRangeException):
self.repository.set_container_flow_generation_properties(
ContainerFlowGenerationProperties.create(
start_date=datetime.datetime.now(),
end_date=datetime.datetime.now() - datetime.timedelta(days=5)
)
)
def test_broken_properties__double_entry_in_database(self):
ContainerFlowGenerationProperties.create(
start_date=datetime.datetime.now(),
end_date=datetime.datetime.now() - datetime.timedelta(days=5)
).save()
ContainerFlowGenerationProperties.create(
start_date=datetime.datetime.now(),
end_date=datetime.datetime.now() - datetime.timedelta(days=5)
).save()
with self.assertRaises(DuplicatedContainerFlowGenerationPropertiesEntryException):
self.repository.get_container_flow_generation_properties()
def test_set_values_on_old_instance(self):
"""
If not set, changes at the old instance should not have any effect.
"""
properties = self.repository.get_container_flow_generation_properties()
name_old = "Test old"
properties.name = name_old
start_date_old = (datetime.datetime.now() - datetime.timedelta(days=5)).date()
properties.start_date = start_date_old
end_date_old = (datetime.datetime.now() - datetime.timedelta(days=2)).date()
properties.end_date = end_date_old
self.repository.set_container_flow_generation_properties(properties)
name_new = "Test new"
properties.name = name_new
start_date_new = (datetime.datetime.now() + datetime.timedelta(days=2)).date()
properties.start_date = start_date_new
end_date_new = (datetime.datetime.now() + datetime.timedelta(days=5)).date()
properties.end_date = end_date_new
loaded_properties = self.repository.get_container_flow_generation_properties()
self.assertEqual(loaded_properties.name, name_old)
self.assertEqual(loaded_properties.start_date, start_date_old)
self.assertEqual(loaded_properties.end_date, end_date_old)
def test_set_values_twice(self):
properties_old = self.repository.get_container_flow_generation_properties()
name_old = "Test old"
properties_old.name = name_old
start_date_old = (datetime.datetime.now() - datetime.timedelta(days=5)).date()
properties_old.start_date = start_date_old
end_date_old = (datetime.datetime.now() - datetime.timedelta(days=2)).date()
properties_old.end_date = end_date_old
self.repository.set_container_flow_generation_properties(properties_old)
properties_new = self.repository.get_container_flow_generation_properties()
name_new = "Test new"
properties_new.name = name_new
start_date_new = (datetime.datetime.now() + datetime.timedelta(days=2)).date()
properties_new.start_date = start_date_new
end_date_new = (datetime.datetime.now() + datetime.timedelta(days=5)).date()
properties_new.end_date = end_date_new
self.repository.set_container_flow_generation_properties(properties_new)
loaded_properties = self.repository.get_container_flow_generation_properties()
self.assertEqual(loaded_properties.name, name_new)
self.assertEqual(loaded_properties.start_date, start_date_new)
self.assertEqual(loaded_properties.end_date, end_date_new)
def test_set_invalid_minimum_maximum_pair_for_import_containers(self):
properties = self.repository.get_container_flow_generation_properties()
name = "Test"
properties.name = name
start_date = datetime.datetime.now()
properties.start_date = start_date
end_date = datetime.datetime.now() + datetime.timedelta(days=5)
properties.end_date = end_date
properties.minimum_dwell_time_of_import_containers_in_hours = 10
properties.maximum_dwell_time_of_import_containers_in_hours = 9
with self.assertRaises(MinimumNotStrictlySmallerThanMaximumException):
self.repository.set_container_flow_generation_properties(properties)
def test_set_invalid_minimum_maximum_pair_for_export_containers(self):
properties = self.repository.get_container_flow_generation_properties()
name = "Test"
properties.name = name
start_date = datetime.datetime.now()
properties.start_date = start_date
end_date = datetime.datetime.now() + datetime.timedelta(days=5)
properties.end_date = end_date
properties.minimum_dwell_time_of_export_containers_in_hours = 10
properties.maximum_dwell_time_of_export_containers_in_hours = 9
with self.assertRaises(MinimumNotStrictlySmallerThanMaximumException):
self.repository.set_container_flow_generation_properties(properties)
```
#### File: tests/flow_generator/test_assign_destination_to_container_service.py
```python
import datetime
import unittest
from conflowgen.flow_generator.assign_destination_to_container_service import \
AssignDestinationToContainerService
from conflowgen.domain_models.arrival_information import TruckArrivalInformationForDelivery, \
TruckArrivalInformationForPickup
from conflowgen.domain_models.container import Container
from conflowgen.domain_models.distribution_models.mode_of_transport_distribution import ModeOfTransportDistribution
from conflowgen.domain_models.distribution_repositories.container_destination_distribution_repository import \
ContainerDestinationDistributionRepository
from conflowgen.domain_models.data_types.container_length import ContainerLength
from conflowgen.domain_models.data_types.mode_of_transport import ModeOfTransport
from conflowgen.domain_models.data_types.storage_requirement import StorageRequirement
from conflowgen.domain_models.large_vehicle_schedule import Schedule, Destination
from conflowgen.domain_models.vehicle import LargeScheduledVehicle, Train, Feeder, DeepSeaVessel, Truck, \
AbstractLargeScheduledVehicle
from conflowgen.tests.substitute_peewee_database import setup_sqlite_in_memory_db
class TestAssignDestinationToContainerService(unittest.TestCase):
def setUp(self) -> None:
"""Create container database in memory"""
sqlite_db = setup_sqlite_in_memory_db()
sqlite_db.create_tables([
Schedule,
LargeScheduledVehicle,
Container,
Destination,
TruckArrivalInformationForDelivery,
TruckArrivalInformationForPickup,
Truck,
Feeder,
DeepSeaVessel,
ModeOfTransportDistribution
])
self.repository = ContainerDestinationDistributionRepository()
self.service = AssignDestinationToContainerService()
@staticmethod
def _create_feeder(scheduled_arrival: datetime.datetime) -> Feeder:
schedule = Schedule.create(
vehicle_type=ModeOfTransport.feeder,
service_name="TestFeederService",
vehicle_arrives_at=scheduled_arrival.date(),
vehicle_arrives_at_time=scheduled_arrival.time(),
average_vehicle_capacity=300,
average_moved_capacity=300,
)
schedule.save()
feeder_lsv = LargeScheduledVehicle.create(
vehicle_name="TestFeeder1",
capacity_in_teu=schedule.average_vehicle_capacity,
moved_capacity=schedule.average_moved_capacity,
scheduled_arrival=scheduled_arrival,
schedule=schedule
)
feeder_lsv.save()
feeder = Feeder.create(
large_scheduled_vehicle=feeder_lsv
)
feeder.save()
return feeder
@staticmethod
def _create_train(scheduled_arrival: datetime.datetime) -> Train:
schedule = Schedule.create(
vehicle_type=ModeOfTransport.train,
service_name="TestTrainService",
vehicle_arrives_at=scheduled_arrival.date(),
vehicle_arrives_at_time=scheduled_arrival.time(),
average_vehicle_capacity=90,
average_moved_capacity=90,
)
schedule.save()
train_lsv = LargeScheduledVehicle.create(
vehicle_name="TestTrain1",
capacity_in_teu=96,
moved_capacity=schedule.average_moved_capacity,
scheduled_arrival=scheduled_arrival,
schedule=schedule
)
train_lsv.save()
train = Train.create(
large_scheduled_vehicle=train_lsv
)
train.save()
return train
@staticmethod
def _create_truck(arrival: datetime.datetime) -> Truck:
ati = TruckArrivalInformationForDelivery.create(
realized_container_delivery_time=arrival,
planned_container_delivery_time_at_window_start=None
)
truck = Truck.create(
delivers_container=False,
picks_up_container=True,
truck_arrival_information_for_delivery=ati
)
truck.save()
return truck
@staticmethod
def _create_container_for_truck(truck: Truck):
container = Container.create(
weight=20,
length=ContainerLength.twenty_feet,
storage_requirement=StorageRequirement.standard,
delivered_by=ModeOfTransport.truck,
delivered_by_truck=truck,
picked_up_by=ModeOfTransport.feeder,
picked_up_by_initial=ModeOfTransport.feeder
)
return container
@staticmethod
def _create_container_for_large_scheduled_vehicle(vehicle: AbstractLargeScheduledVehicle):
large_scheduled_vehicle = vehicle.large_scheduled_vehicle
vehicle_type = vehicle.get_mode_of_transport()
container = Container.create(
weight=20,
length=ContainerLength.twenty_feet,
storage_requirement=StorageRequirement.standard,
delivered_by=vehicle_type,
delivered_by_large_scheduled_vehicle=large_scheduled_vehicle,
picked_up_by=ModeOfTransport.feeder,
picked_up_by_initial=ModeOfTransport.feeder
)
container.save()
return container
def test_no_exception_for_empty_database(self):
self.service.assign()
def test_load_container_from_truck_onto_feeder(self):
truck = self._create_truck(datetime.datetime(year=2021, month=8, day=5, hour=9, minute=0))
feeder = self._create_feeder(datetime.datetime(year=2021, month=8, day=7, hour=13, minute=15))
container = self._create_container_for_truck(truck)
container.picked_up_by_large_scheduled_vehicle = feeder.large_scheduled_vehicle
container.save()
schedule = feeder.large_scheduled_vehicle.schedule
destination_1 = Destination.create(
belongs_to_schedule=schedule,
sequence_id=1,
destination_name="TestDestination1",
)
destination_2 = Destination.create(
belongs_to_schedule=schedule,
sequence_id=2,
destination_name="TestDestination2",
)
distribution = {
schedule: {
destination_1: 0.4,
destination_2: 0.6
}
}
self.repository.set_distribution(distribution)
self.service.reload_distribution()
self.service.assign()
container_update: Container = Container.get_by_id(container.id)
self.assertIn(container_update.destination, (destination_1, destination_2))
def test_wrong_direction(self):
truck = self._create_truck(datetime.datetime(year=2021, month=8, day=5, hour=9, minute=0))
feeder = self._create_feeder(datetime.datetime(year=2021, month=8, day=7, hour=13, minute=15))
container = self._create_container_for_large_scheduled_vehicle(feeder)
container.picked_up_by_large_scheduled_vehicle = None
container.picked_up_by_truck = truck
container.picked_up_by = ModeOfTransport.truck
container.save()
schedule = feeder.large_scheduled_vehicle.schedule
destination_1 = Destination.create(
belongs_to_schedule=schedule,
sequence_id=1,
destination_name="TestDestination1",
)
destination_2 = Destination.create(
belongs_to_schedule=schedule,
sequence_id=2,
destination_name="TestDestination2",
)
distribution = {
schedule: {
destination_1: 0.4,
destination_2: 0.6
}
}
self.repository.set_distribution(distribution)
self.service.reload_distribution()
self.service.assign()
container_update: Container = Container.get_by_id(container.id)
self.assertIsNone(container_update.destination)
```
#### File: tests/posthoc_analyses/test_quay_side_throughput_analysis.py
```python
import datetime
import unittest
from conflowgen.domain_models.arrival_information import TruckArrivalInformationForPickup, \
TruckArrivalInformationForDelivery
from conflowgen.domain_models.container import Container
from conflowgen.domain_models.data_types.container_length import ContainerLength
from conflowgen.domain_models.data_types.mode_of_transport import ModeOfTransport
from conflowgen.domain_models.data_types.storage_requirement import StorageRequirement
from conflowgen.domain_models.distribution_models.mode_of_transport_distribution import ModeOfTransportDistribution
from conflowgen.domain_models.distribution_seeders import mode_of_transport_distribution_seeder
from conflowgen.domain_models.large_vehicle_schedule import Schedule, Destination
from conflowgen.domain_models.vehicle import LargeScheduledVehicle, Truck, Feeder
from conflowgen.posthoc_analyses.quay_side_throughput_analysis import QuaySideThroughputAnalysis
from conflowgen.tests.substitute_peewee_database import setup_sqlite_in_memory_db
class TestQuaySideThroughputAnalysis(unittest.TestCase):
def setUp(self) -> None:
"""Create container database in memory"""
self.sqlite_db = setup_sqlite_in_memory_db()
self.sqlite_db.create_tables([
Schedule,
Container,
LargeScheduledVehicle,
Truck,
TruckArrivalInformationForDelivery,
TruckArrivalInformationForPickup,
Feeder,
ModeOfTransportDistribution,
Destination
])
mode_of_transport_distribution_seeder.seed()
self.analysis = QuaySideThroughputAnalysis(
transportation_buffer=0.2
)
def test_with_no_data(self):
"""If no schedules are provided, no capacity is needed"""
no_action_at_quay_side = self.analysis.get_throughput_over_time()
self.assertEqual(no_action_at_quay_side, {})
def test_with_single_container(self):
now = datetime.datetime.now()
schedule = Schedule.create(
vehicle_type=ModeOfTransport.feeder,
service_name="TestFeederService",
vehicle_arrives_at=now.date(),
vehicle_arrives_at_time=now.time(),
average_vehicle_capacity=300,
average_moved_capacity=300,
)
feeder_lsv = LargeScheduledVehicle.create(
vehicle_name="TestFeeder1",
capacity_in_teu=300,
moved_capacity=schedule.average_moved_capacity,
scheduled_arrival=now,
schedule=schedule
)
Feeder.create(
large_scheduled_vehicle=feeder_lsv
)
aip = TruckArrivalInformationForPickup.create(
realized_container_pickup_time=datetime.datetime.now() + datetime.timedelta(hours=25)
)
truck = Truck.create(
delivers_container=False,
picks_up_container=True,
truck_arrival_information_for_delivery=None,
truck_arrival_information_for_pickup=aip
)
Container.create(
weight=20,
length=ContainerLength.twenty_feet,
storage_requirement=StorageRequirement.standard,
delivered_by=ModeOfTransport.feeder,
delivered_by_large_scheduled_vehicle=feeder_lsv,
picked_up_by=ModeOfTransport.truck,
picked_up_by_initial=ModeOfTransport.truck,
picked_up_by_truck=truck
)
used_quay_side_capacity_over_time = self.analysis.get_throughput_over_time()
self.assertEqual(len(used_quay_side_capacity_over_time), 3)
self.assertSetEqual(set(used_quay_side_capacity_over_time.values()), {0, 1})
def test_with_two_containers(self):
now = datetime.datetime.now()
schedule = Schedule.create(
vehicle_type=ModeOfTransport.feeder,
service_name="TestFeederService",
vehicle_arrives_at=now.date(),
vehicle_arrives_at_time=now.time(),
average_vehicle_capacity=300,
average_moved_capacity=300,
)
feeder_lsv = LargeScheduledVehicle.create(
vehicle_name="TestFeeder1",
capacity_in_teu=300,
moved_capacity=schedule.average_moved_capacity,
scheduled_arrival=now,
schedule=schedule
)
Feeder.create(
large_scheduled_vehicle=feeder_lsv
)
aip = TruckArrivalInformationForPickup.create(
realized_container_pickup_time=datetime.datetime.now() + datetime.timedelta(hours=25)
)
truck = Truck.create(
delivers_container=False,
picks_up_container=True,
truck_arrival_information_for_delivery=None,
truck_arrival_information_for_pickup=aip
)
Container.create(
weight=20,
length=ContainerLength.twenty_feet,
storage_requirement=StorageRequirement.standard,
delivered_by=ModeOfTransport.feeder,
delivered_by_large_scheduled_vehicle=feeder_lsv,
picked_up_by=ModeOfTransport.truck,
picked_up_by_initial=ModeOfTransport.truck,
picked_up_by_truck=truck
)
aip_2 = TruckArrivalInformationForPickup.create(
realized_container_pickup_time=datetime.datetime.now() + datetime.timedelta(hours=12)
)
truck_2 = Truck.create(
delivers_container=False,
picks_up_container=True,
truck_arrival_information_for_delivery=None,
truck_arrival_information_for_pickup=aip_2
)
Container.create(
weight=20,
length=ContainerLength.forty_feet,
storage_requirement=StorageRequirement.standard,
delivered_by=ModeOfTransport.feeder,
delivered_by_large_scheduled_vehicle=feeder_lsv,
picked_up_by=ModeOfTransport.truck,
picked_up_by_initial=ModeOfTransport.truck,
picked_up_by_truck=truck_2
)
used_quay_side_capacity_over_time = self.analysis.get_throughput_over_time()
self.assertEqual(len(used_quay_side_capacity_over_time), 3)
self.assertSetEqual(set(used_quay_side_capacity_over_time.values()), {0, 2})
```
#### File: tests/previews/test_container_flow_by_vehicle_type_preview.py
```python
import datetime
import unittest
from conflowgen.domain_models.data_types.mode_of_transport import ModeOfTransport
from conflowgen.domain_models.distribution_models.mode_of_transport_distribution import ModeOfTransportDistribution
from conflowgen.domain_models.distribution_repositories.mode_of_transport_distribution_repository import \
ModeOfTransportDistributionRepository
from conflowgen.domain_models.large_vehicle_schedule import Schedule
from conflowgen.previews.container_flow_by_vehicle_type_preview import \
ContainerFlowByVehicleTypePreview
from conflowgen.tests.substitute_peewee_database import setup_sqlite_in_memory_db
class TestContainerFlowByVehicleTypePreview(unittest.TestCase):
def setUp(self) -> None:
"""Create container database in memory"""
self.sqlite_db = setup_sqlite_in_memory_db()
self.sqlite_db.create_tables([
Schedule,
ModeOfTransportDistribution
])
now = datetime.datetime.now()
ModeOfTransportDistributionRepository().set_mode_of_transport_distributions({
ModeOfTransport.truck: {
ModeOfTransport.truck: 0,
ModeOfTransport.train: 0,
ModeOfTransport.barge: 0,
ModeOfTransport.feeder: 0.5,
ModeOfTransport.deep_sea_vessel: 0.5
},
ModeOfTransport.train: {
ModeOfTransport.truck: 0,
ModeOfTransport.train: 0,
ModeOfTransport.barge: 0,
ModeOfTransport.feeder: 0.5,
ModeOfTransport.deep_sea_vessel: 0.5
},
ModeOfTransport.barge: {
ModeOfTransport.truck: 0,
ModeOfTransport.train: 0,
ModeOfTransport.barge: 0,
ModeOfTransport.feeder: 0.5,
ModeOfTransport.deep_sea_vessel: 0.5
},
ModeOfTransport.feeder: {
ModeOfTransport.truck: 0.2,
ModeOfTransport.train: 0.4,
ModeOfTransport.barge: 0.1,
ModeOfTransport.feeder: 0.15,
ModeOfTransport.deep_sea_vessel: 0.15
},
ModeOfTransport.deep_sea_vessel: {
ModeOfTransport.truck: 0.2,
ModeOfTransport.train: 0.4,
ModeOfTransport.barge: 0.1,
ModeOfTransport.feeder: 0.15,
ModeOfTransport.deep_sea_vessel: 0.15
}
})
self.preview = ContainerFlowByVehicleTypePreview(
start_date=now.date(),
end_date=(now + datetime.timedelta(weeks=2)).date(),
transportation_buffer=0.2
)
def test_with_no_schedules(self):
"""If no schedules are provided, no capacity is needed"""
empty_flow = self.preview.get_inbound_to_outbound_flow()
self.assertSetEqual(set(ModeOfTransport), set(empty_flow.keys()))
for mode_of_transport_from in ModeOfTransport:
flow_from_vehicle_type = empty_flow[mode_of_transport_from]
self.assertSetEqual(set(ModeOfTransport), set(flow_from_vehicle_type.keys()))
for mode_of_transport_to in ModeOfTransport:
capacity_in_teu = flow_from_vehicle_type[mode_of_transport_to]
self.assertEqual(capacity_in_teu, 0, f"Expect no flow from {mode_of_transport_from} to "
f"{mode_of_transport_to} but it was {capacity_in_teu}")
def test_with_single_arrival_schedules(self):
one_week_later = datetime.datetime.now() + datetime.timedelta(weeks=1)
schedule = Schedule.create(
vehicle_type=ModeOfTransport.feeder,
service_name="TestFeederService",
vehicle_arrives_at=one_week_later.date(),
vehicle_arrives_at_time=one_week_later.time(),
average_vehicle_capacity=300,
average_moved_capacity=300,
vehicle_arrives_every_k_days=-1
)
schedule.save()
flow_with_one_entry = self.preview.get_inbound_to_outbound_flow()
self.assertSetEqual(set(ModeOfTransport), set(flow_with_one_entry.keys()))
uninvolved_vehicles = set(ModeOfTransport) - {ModeOfTransport.feeder, ModeOfTransport.truck}
for mode_of_transport_from in uninvolved_vehicles:
flow_from_vehicle_type = flow_with_one_entry[mode_of_transport_from]
self.assertSetEqual(set(ModeOfTransport), set(flow_from_vehicle_type.keys()))
for mode_of_transport_to in ModeOfTransport:
capacity_in_teu = flow_from_vehicle_type[mode_of_transport_to]
self.assertEqual(capacity_in_teu, 0, f"Expect no flow from {mode_of_transport_from} to "
f"{mode_of_transport_to} but it was {capacity_in_teu}")
flow_to_feeder = flow_with_one_entry[ModeOfTransport.feeder]
for mode_of_transport_to in (set(ModeOfTransport) - {ModeOfTransport.barge}):
transported_capacity = flow_to_feeder[mode_of_transport_to]
self.assertGreater(transported_capacity, 0)
flow_from_truck_to_feeder = flow_with_one_entry[ModeOfTransport.truck][ModeOfTransport.feeder]
self.assertGreater(flow_from_truck_to_feeder, 0, "Some containers must be delivered by truck for the feeder")
```
#### File: tests/previews/test_inbound_and_outbound_vehicle_capacity_preview.py
```python
import datetime
import unittest
from conflowgen.domain_models.distribution_repositories.mode_of_transport_distribution_repository import \
ModeOfTransportDistributionRepository
from conflowgen.previews.inbound_and_outbound_vehicle_capacity_preview import \
InboundAndOutboundVehicleCapacityPreview
from conflowgen.domain_models.data_types.mode_of_transport import ModeOfTransport
from conflowgen.domain_models.distribution_models.mode_of_transport_distribution import ModeOfTransportDistribution
from conflowgen.domain_models.large_vehicle_schedule import Schedule
from conflowgen.tests.substitute_peewee_database import setup_sqlite_in_memory_db
class TestInboundAndOutboundVehicleCapacityPreview(unittest.TestCase):
def setUp(self) -> None:
"""Create container database in memory"""
self.sqlite_db = setup_sqlite_in_memory_db()
self.sqlite_db.create_tables([
Schedule,
ModeOfTransportDistribution
])
now = datetime.datetime.now()
ModeOfTransportDistributionRepository().set_mode_of_transport_distributions({
ModeOfTransport.truck: {
ModeOfTransport.truck: 0,
ModeOfTransport.train: 0,
ModeOfTransport.barge: 0,
ModeOfTransport.feeder: 0.5,
ModeOfTransport.deep_sea_vessel: 0.5
},
ModeOfTransport.train: {
ModeOfTransport.truck: 0,
ModeOfTransport.train: 0,
ModeOfTransport.barge: 0,
ModeOfTransport.feeder: 0.5,
ModeOfTransport.deep_sea_vessel: 0.5
},
ModeOfTransport.barge: {
ModeOfTransport.truck: 0,
ModeOfTransport.train: 0,
ModeOfTransport.barge: 0,
ModeOfTransport.feeder: 0.5,
ModeOfTransport.deep_sea_vessel: 0.5
},
ModeOfTransport.feeder: {
ModeOfTransport.truck: 0.2,
ModeOfTransport.train: 0.4,
ModeOfTransport.barge: 0.1,
ModeOfTransport.feeder: 0.15,
ModeOfTransport.deep_sea_vessel: 0.15
},
ModeOfTransport.deep_sea_vessel: {
ModeOfTransport.truck: 0.2,
ModeOfTransport.train: 0.4,
ModeOfTransport.barge: 0.1,
ModeOfTransport.feeder: 0.15,
ModeOfTransport.deep_sea_vessel: 0.15
}
})
self.preview = InboundAndOutboundVehicleCapacityPreview(
start_date=now.date(),
end_date=(now + datetime.timedelta(weeks=2)).date(),
transportation_buffer=0.2
)
def test_inbound_with_no_schedules(self):
"""If no schedules are provided, no capacity is needed"""
empty_capacity = self.preview.get_inbound_capacity_of_vehicles()
self.assertSetEqual(set(ModeOfTransport), set(empty_capacity.keys()))
for mode_of_transport in ModeOfTransport:
capacity_in_teu = empty_capacity[mode_of_transport]
self.assertEqual(capacity_in_teu, 0)
def test_inbound_with_single_arrival_schedules(self):
one_week_later = datetime.datetime.now() + datetime.timedelta(weeks=1)
schedule = Schedule.create(
vehicle_type=ModeOfTransport.feeder,
service_name="TestFeederService",
vehicle_arrives_at=one_week_later.date(),
vehicle_arrives_at_time=one_week_later.time(),
average_vehicle_capacity=300,
average_moved_capacity=300,
vehicle_arrives_every_k_days=-1
)
schedule.save()
capacity_with_one_feeder = self.preview.get_inbound_capacity_of_vehicles()
self.assertSetEqual(set(ModeOfTransport), set(capacity_with_one_feeder.keys()))
uninvolved_vehicles = (
set(ModeOfTransport.get_scheduled_vehicles())
- {ModeOfTransport.feeder, ModeOfTransport.truck}
)
for mode_of_transport in uninvolved_vehicles:
capacity_in_teu = capacity_with_one_feeder[mode_of_transport]
self.assertEqual(capacity_in_teu, 0)
inbound_capacity_of_feeder_in_teu = capacity_with_one_feeder[ModeOfTransport.feeder]
self.assertEqual(inbound_capacity_of_feeder_in_teu, 300)
# based on the seeded ModeOfTransportDistribution, the following value might vary
inbound_capacity_of_trucks_in_teu = capacity_with_one_feeder[ModeOfTransport.truck]
self.assertGreater(inbound_capacity_of_trucks_in_teu, 0)
self.assertLess(inbound_capacity_of_trucks_in_teu, 300)
def test_inbound_with_several_arrivals_schedules(self):
two_days_later = datetime.datetime.now() + datetime.timedelta(days=2)
schedule = Schedule.create(
vehicle_type=ModeOfTransport.feeder,
service_name="TestFeederService",
vehicle_arrives_at=two_days_later.date(),
vehicle_arrives_at_time=two_days_later.time(),
average_vehicle_capacity=300,
average_moved_capacity=300
)
schedule.save()
capacity_with_one_feeder = self.preview.get_inbound_capacity_of_vehicles()
self.assertSetEqual(set(ModeOfTransport), set(capacity_with_one_feeder.keys()))
uninvolved_vehicles = (
set(ModeOfTransport.get_scheduled_vehicles())
- {ModeOfTransport.feeder, ModeOfTransport.truck}
)
for mode_of_transport in uninvolved_vehicles:
capacity_in_teu = capacity_with_one_feeder[mode_of_transport]
self.assertEqual(capacity_in_teu, 0)
inbound_capacity_of_feeder_in_teu = capacity_with_one_feeder[ModeOfTransport.feeder]
self.assertEqual(inbound_capacity_of_feeder_in_teu, 600)
# based on the seeded ModeOfTransportDistribution, this value might vary if not properly set
inbound_capacity_of_trucks_in_teu = capacity_with_one_feeder[ModeOfTransport.truck]
self.assertAlmostEqual(inbound_capacity_of_trucks_in_teu, 120)
def test_outbound_average_capacity_with_several_arrivals_schedules(self):
"""`capacity_with_one_feeder, _ = self.preview.get_outbound_capacity_of_vehicles()` is the key difference!"""
two_days_later = datetime.datetime.now() + datetime.timedelta(days=2)
schedule = Schedule.create(
vehicle_type=ModeOfTransport.feeder,
service_name="TestFeederService",
vehicle_arrives_at=two_days_later.date(),
vehicle_arrives_at_time=two_days_later.time(),
average_vehicle_capacity=300,
average_moved_capacity=300
)
schedule.save()
capacity_with_one_feeder, _ = self.preview.get_outbound_capacity_of_vehicles()
self.assertSetEqual(set(ModeOfTransport), set(capacity_with_one_feeder.keys()))
uninvolved_vehicles = (
set(ModeOfTransport.get_scheduled_vehicles())
- {ModeOfTransport.feeder, ModeOfTransport.truck}
)
for mode_of_transport in uninvolved_vehicles:
capacity_in_teu = capacity_with_one_feeder[mode_of_transport]
self.assertEqual(capacity_in_teu, 0)
feeder_vessel_capacity_in_teu = capacity_with_one_feeder[ModeOfTransport.feeder]
self.assertEqual(feeder_vessel_capacity_in_teu, 600)
# based on the seeded ModeOfTransportDistribution, this value might vary if not properly set
truck_capacity_in_teu = capacity_with_one_feeder[ModeOfTransport.truck]
self.assertGreater(truck_capacity_in_teu, 0)
self.assertLess(truck_capacity_in_teu, feeder_vessel_capacity_in_teu)
self.assertAlmostEqual(truck_capacity_in_teu, 120, msg="20% of feeder traffic goes to trucks (600*0.2)")
def test_outbound_maximum_capacity_with_several_arrivals_schedules(self):
"""`_, capacity_with_one_feeder = self.preview.get_outbound_capacity_of_vehicles()` is the key difference!"""
two_days_later = datetime.datetime.now() + datetime.timedelta(days=2)
schedule = Schedule.create(
vehicle_type=ModeOfTransport.feeder,
service_name="TestFeederService",
vehicle_arrives_at=two_days_later.date(),
vehicle_arrives_at_time=two_days_later.time(),
average_vehicle_capacity=400,
average_moved_capacity=300
)
schedule.save()
_, capacity_with_one_feeder = self.preview.get_outbound_capacity_of_vehicles()
self.assertSetEqual(set(ModeOfTransport), set(capacity_with_one_feeder.keys()))
uninvolved_vehicles = (
set(ModeOfTransport.get_scheduled_vehicles())
- {ModeOfTransport.feeder, ModeOfTransport.truck}
)
for mode_of_transport in uninvolved_vehicles:
capacity_in_teu = capacity_with_one_feeder[mode_of_transport]
self.assertEqual(capacity_in_teu, 0)
feeder_vessel_capacity_in_teu = capacity_with_one_feeder[ModeOfTransport.feeder]
self.assertEqual(feeder_vessel_capacity_in_teu, 720)
# based on the seeded ModeOfTransportDistribution, this value might vary if not properly set
truck_capacity_in_teu = capacity_with_one_feeder[ModeOfTransport.truck]
self.assertEqual(truck_capacity_in_teu, -1, "There is no maximum capacity for trucks, they are generated "
"as they are needed.")
``` |
{
"source": "1-gut/musicsamples",
"score": 2
} |
#### File: app/tests/test_urls.py
```python
from django.test import SimpleTestCase
from django.urls import resolve, reverse
from app import views
class TestUrls(SimpleTestCase):
def test_index_url_resolves(self):
url = reverse("home")
self.assertEqual(resolve(url).func, views.index)
def test_analytics_url_resolves(self):
url = reverse("analytics")
self.assertEqual(resolve(url).func, views.analytics)
def test_reference_url_resolves(self):
url = reverse("reference")
self.assertEqual(resolve(url).func, views.reference)
def test_archive_url_resolves(self):
url = reverse("sample_archive")
self.assertEqual(resolve(url).func, views.sample_archive)
def test_add_sample_url_resolves(self):
url = reverse("sample_add")
self.assertEqual(resolve(url).func, views.sample_add)
def test_view_single_sample_url_resolves(self):
url = reverse("sample_detail", kwargs={"pk": 1})
self.assertEqual(resolve(url).func, views.sample_detail)
def test_edit_sample_url_resolves(self):
url = reverse("sample_edit", kwargs={"pk": 23})
self.assertEqual(resolve(url).func, views.sample_edit)
def test_delete_sample_url_resolves(self):
url = reverse("sample_delete", kwargs={"pk": 3})
self.assertEqual(resolve(url).func, views.sample_delete)
def test_restore_sample_url_resolves(self):
url = reverse("sample_restore", kwargs={"pk": 3})
self.assertEqual(resolve(url).func, views.sample_restore)
def test_checkout_sample_url_resolves(self):
url = reverse("sample_checkout", kwargs={"pk": 3})
self.assertEqual(resolve(url).func, views.sample_checkout)
def test_search_url_resolves(self):
url = reverse("sample_search")
self.assertEqual(resolve(url).func, views.sample_search)
def test_export_excel_url_resolves(self):
url = reverse("export_excel")
self.assertEqual(resolve(url).func, views.export_excel)
def test_account_page_url_resolves(self):
url = reverse("account")
self.assertEqual(resolve(url).func, views.account)
def test_autocomplete_locations_url_resolves(self):
url = reverse("autocomplete_locations")
self.assertEqual(resolve(url).func, views.autocomplete_locations)
def test_autocomplete_patient_id_url_resolves(self):
url = reverse("autocomplete_patients")
self.assertEqual(resolve(url).func, views.autocomplete_patient_id)
def test_barcode_url_resolves(self):
url = reverse("barcode")
self.assertEqual(resolve(url).func, views.barcode)
def test_barcode_samples_used_url_resolves(self):
url = reverse("barcode_samples_used")
self.assertEqual(resolve(url).func, views.barcode_samples_used)
def test_data_export_url_resolves(self):
url = reverse("data_export")
self.assertEqual(resolve(url).func, views.data_export)
def test_export_study_samples_url(self):
url = reverse("export_study_samples", kwargs={"study_name": "gidamps"})
self.assertEqual(resolve(url).func, views.export_study_samples)
def test_filter_url(self):
url = reverse("filter")
self.assertEqual(resolve(url).func, views.filter)
def test_filter_export_csv_url(self):
url = reverse("filter_export_csv")
self.assertEqual(resolve(url).func, views.filter_export_csv)
```
#### File: musicsamples/app/utils.py
```python
import csv
import datetime
from django.http import HttpResponse
def export_csv(queryset, study_name="gtrac"):
"""
Takes in queryset, returns csv download response
study_name parameter is optional to control the name of the csv file.
"""
current_date = datetime.datetime.now().strftime("%d-%b-%Y")
response = HttpResponse(content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="%s_samples_%s.csv"' % (
study_name,
current_date,
)
writer = csv.writer(response)
field_names = [field.name for field in queryset.model._meta.get_fields()]
writer.writerow(field_names)
for row in queryset:
values = []
for field in field_names:
value = getattr(row, field)
if value is None:
value = ""
values.append(value)
writer.writerow(values)
return response
``` |
{
"source": "1handclapping/CumulusCI",
"score": 2
} |
#### File: core/tests/test_config.py
```python
from __future__ import absolute_import
import os
import unittest
import mock
from cumulusci.core.config import BaseConfig
from cumulusci.core.config import BaseGlobalConfig
from cumulusci.core.config import BaseProjectConfig
from cumulusci.core.config import BaseTaskFlowConfig
from cumulusci.core.config import OrgConfig
from cumulusci.core.exceptions import ConfigError
from cumulusci.core.exceptions import DependencyResolutionError
from cumulusci.core.exceptions import KeychainNotFound
from cumulusci.core.exceptions import FlowNotFoundError
from cumulusci.core.exceptions import TaskNotFoundError
from cumulusci.utils import temporary_dir
class TestBaseConfig(unittest.TestCase):
def test_getattr_toplevel_key(self):
config = BaseConfig()
config.config = {"foo": "bar"}
self.assertEqual(config.foo, "bar")
def test_getattr_toplevel_key_missing(self):
config = BaseConfig()
config.config = {}
self.assertEqual(config.foo, None)
def test_getattr_child_key(self):
config = BaseConfig()
config.config = {"foo": {"bar": "baz"}}
self.assertEqual(config.foo__bar, "baz")
def test_getattr_child_parent_key_missing(self):
config = BaseConfig()
config.config = {}
self.assertEqual(config.foo__bar, None)
def test_getattr_child_key_missing(self):
config = BaseConfig()
config.config = {"foo": {}}
self.assertEqual(config.foo__bar, None)
def test_getattr_default_toplevel(self):
config = BaseConfig()
config.config = {"foo": "bar"}
config.defaults = {"foo": "default"}
self.assertEqual(config.foo, "bar")
def test_getattr_default_toplevel_missing_default(self):
config = BaseConfig()
config.config = {"foo": "bar"}
config.defaults = {}
self.assertEqual(config.foo, "bar")
def test_getattr_default_toplevel_missing_config(self):
config = BaseConfig()
config.config = {}
config.defaults = {"foo": "default"}
self.assertEqual(config.foo, "default")
def test_getattr_default_child(self):
config = BaseConfig()
config.config = {"foo": {"bar": "baz"}}
config.defaults = {"foo__bar": "default"}
self.assertEqual(config.foo__bar, "baz")
def test_getattr_default_child_missing_default(self):
config = BaseConfig()
config.config = {"foo": {"bar": "baz"}}
config.defaults = {}
self.assertEqual(config.foo__bar, "baz")
def test_getattr_default_child_missing_config(self):
config = BaseConfig()
config.config = {}
config.defaults = {"foo__bar": "default"}
self.assertEqual(config.foo__bar, "default")
class DummyContents(object):
def __init__(self, content):
self.decoded = content
class DummyRepository(object):
default_branch = "master"
_api = "http://"
def __init__(self, owner, name, contents, releases=None):
self.owner = owner
self.name = name
self.html_url = "https://github.com/{}/{}".format(owner, name)
self._contents = contents
self._releases = releases
def file_contents(self, path, **kw):
try:
return self._contents[path]
except KeyError:
raise AssertionError("Accessed unexpected file: {}".format(path))
def directory_contents(self, path, **kw):
try:
return self._contents[path]
except KeyError:
raise AssertionError("Accessed unexpected file: {}".format(path))
def _build_url(self, *args, **kw):
return self._api
def _get(self, url):
res = mock.Mock()
res.json.return_value = {"name": "2"}
return res
def releases(self):
return iter(self._releases)
class DummyRelease(object):
def __init__(self, tag_name, name=None):
self.tag_name = tag_name
self.name = name
CUMULUSCI_TEST_REPO = DummyRepository(
"SFDO-Tooling",
"CumulusCI-Test",
{
"cumulusci.yml": DummyContents(
b"""
project:
name: CumulusCI-Test
package:
name: Cumulus-Test
namespace: ccitest
git:
repo_url: https://github.com/SFDO-Tooling/CumulusCI-Test
dependencies:
- github: https://github.com/SFDO-Tooling/CumulusCI-Test-Dep
"""
),
"unpackaged/pre": {"pre": {}, "skip": {}},
"src": {"src": ""},
"unpackaged/post": {"post": {}, "skip": {}},
},
)
CUMULUSCI_TEST_DEP_REPO = DummyRepository(
"SFDO-Tooling",
"CumulusCI-Test-Dep",
{
"cumulusci.yml": DummyContents(
b"""
project:
name: CumulusCI-Test-Dep
package:
name: Cumulus-Test-Dep
namespace: ccitestdep
git:
repo_url: https://github.com/SFDO-Tooling/CumulusCI-Test-Dep
"""
),
"unpackaged/pre": {},
"src": {},
"unpackaged/post": {},
},
)
CUMULUSCI_REPO = DummyRepository(
"SFDO-Tooling",
"CumulusCI",
{},
[
DummyRelease("beta/1.0-Beta_1"),
DummyRelease("beta/bogus"),
DummyRelease("release/1.0"),
],
)
class DummyGithub(object):
def repository(self, owner, name):
if name == "CumulusCI":
return CUMULUSCI_REPO
elif name == "CumulusCI-Test":
return CUMULUSCI_TEST_REPO
elif name == "CumulusCI-Test-Dep":
return CUMULUSCI_TEST_DEP_REPO
else:
raise AssertionError("Unexpected repository: {}".format(name))
class DummyService(object):
password = "password"
def __init__(self, name):
self.name = name
class DummyKeychain(object):
def get_service(self, name):
return DummyService(name)
class TestBaseProjectConfig(unittest.TestCase):
def test_config_global_local(self):
global_config = BaseGlobalConfig()
global_config.config_global_local = {}
config = BaseProjectConfig(global_config)
self.assertIs(global_config.config_global_local, config.config_global_local)
def test_config_global(self):
global_config = BaseGlobalConfig()
global_config.config_global = {}
config = BaseProjectConfig(global_config)
self.assertIs(global_config.config_global, config.config_global)
def test_repo_info(self):
env = {
"CUMULUSCI_AUTO_DETECT": "1",
"HEROKU_TEST_RUN_ID": "TEST1",
"HEROKU_TEST_RUN_BRANCH": "master",
"HEROKU_TEST_RUN_COMMIT_VERSION": "HEAD",
"CUMULUSCI_REPO_BRANCH": "feature/test",
"CUMULUSCI_REPO_COMMIT": "HEAD~1",
"CUMULUSCI_REPO_ROOT": ".",
"CUMULUSCI_REPO_URL": "https://github.com/SFDO-Tooling/CumulusCI-Test.git",
}
with mock.patch.dict(os.environ, env):
config = BaseProjectConfig(BaseGlobalConfig())
result = config.repo_info
self.assertEqual(
{
"ci": "heroku",
"name": "CumulusCI-Test",
"owner": "SFDO-Tooling",
"branch": "feature/test",
"commit": "HEAD~1",
"root": ".",
"url": "https://github.com/SFDO-Tooling/CumulusCI-Test.git",
},
result,
)
def test_repo_info_missing_env(self):
env = {
"CUMULUSCI_AUTO_DETECT": "1",
"HEROKU_TEST_RUN_ID": "TEST1",
"HEROKU_TEST_RUN_BRANCH": "master",
"HEROKU_TEST_RUN_COMMIT_VERSION": "HEAD",
"CUMULUSCI_REPO_BRANCH": "feature/test",
"CUMULUSCI_REPO_COMMIT": "HEAD~1",
"CUMULUSCI_REPO_ROOT": ".",
}
with mock.patch.dict(os.environ, env):
with self.assertRaises(ConfigError):
config = BaseProjectConfig(BaseGlobalConfig())
config.repo_info
def test_repo_root_from_env(self):
config = BaseProjectConfig(BaseGlobalConfig())
config._repo_info = {"root": "."}
self.assertEqual(".", config.repo_root)
def test_repo_name_from_repo_info(self):
config = BaseProjectConfig(BaseGlobalConfig())
config._repo_info = {"name": "CumulusCI"}
self.assertEqual("CumulusCI", config.repo_name)
def test_repo_name_no_repo_root(self):
config = BaseProjectConfig(BaseGlobalConfig())
with temporary_dir() as d:
self.assertIsNone(config.repo_name)
def test_repo_name_from_git(self):
config = BaseProjectConfig(BaseGlobalConfig())
self.assertEqual("CumulusCI", config.repo_name)
def test_repo_url_from_repo_info(self):
config = BaseProjectConfig(BaseGlobalConfig())
config._repo_info = {"url": "https://github.com/SFDO-Tooling/CumulusCI"}
self.assertEqual("https://github.com/SFDO-Tooling/CumulusCI", config.repo_url)
def test_repo_url_no_repo_root(self):
config = BaseProjectConfig(BaseGlobalConfig())
with temporary_dir() as d:
self.assertIsNone(config.repo_url)
def test_repo_url_from_git(self):
config = BaseProjectConfig(BaseGlobalConfig())
self.assertIn("/CumulusCI", config.repo_url)
def test_repo_owner_from_repo_info(self):
config = BaseProjectConfig(BaseGlobalConfig())
config._repo_info = {"owner": "SFDO-Tooling"}
self.assertEqual("SFDO-Tooling", config.repo_owner)
def test_repo_owner_no_repo_root(self):
config = BaseProjectConfig(BaseGlobalConfig())
with temporary_dir() as d:
self.assertIsNone(config.repo_owner)
def test_repo_branch_from_repo_info(self):
config = BaseProjectConfig(BaseGlobalConfig())
config._repo_info = {"branch": "master"}
self.assertEqual("master", config.repo_branch)
def test_repo_branch_no_repo_root(self):
config = BaseProjectConfig(BaseGlobalConfig())
with temporary_dir() as d:
self.assertIsNone(config.repo_branch)
def test_repo_commit_from_repo_info(self):
config = BaseProjectConfig(BaseGlobalConfig())
config._repo_info = {"commit": "abcdef"}
self.assertEqual("abcdef", config.repo_commit)
def test_repo_commit_no_repo_root(self):
config = BaseProjectConfig(BaseGlobalConfig())
with temporary_dir() as d:
self.assertIsNone(config.repo_commit)
def test_repo_commit_no_repo_branch(self):
config = BaseProjectConfig(BaseGlobalConfig())
with temporary_dir() as d:
os.mkdir(os.path.join(d, ".git"))
with open(os.path.join(d, ".git", "HEAD"), "w") as f:
f.write("abcdef")
self.assertIsNone(config.repo_commit)
def test_repo_commit_packed_refs(self):
config = BaseProjectConfig(BaseGlobalConfig())
with temporary_dir() as d:
os.system("git init")
with open(os.path.join(d, ".git", "packed-refs"), "w") as f:
f.write("# pack-refs with: peeled fully-peeled sorted\n")
f.write("#\n")
f.write(
"8ce67f4519190cd1ec9785105168e21b9599bc27 refs/remotes/origin/master\n"
)
self.assertIsNotNone(config.repo_commit)
def test_use_sentry(self):
config = BaseProjectConfig(BaseGlobalConfig())
config.keychain = mock.Mock()
self.assertTrue(config.use_sentry)
@mock.patch("raven.Client")
def test_init_sentry(self, raven_client):
config = BaseProjectConfig(BaseGlobalConfig())
config.keychain = mock.Mock()
config.init_sentry()
self.assertEqual(
{"repo", "commit", "cci version", "branch"},
set(raven_client.call_args[1]["tags"].keys()),
)
def test_get_latest_version(self):
config = BaseProjectConfig(
BaseGlobalConfig(),
{
"project": {
"git": {"prefix_beta": "beta/", "prefix_release": "release/"}
}
},
)
config.get_github_api = DummyGithub
result = config.get_latest_version(beta=True)
self.assertEqual("1.0 (Beta 1)", result)
def test_config_project_path_no_repo_root(self):
config = BaseProjectConfig(BaseGlobalConfig())
with temporary_dir() as d:
self.assertIsNone(config.config_project_path)
def test_get_tag_for_version(self):
config = BaseProjectConfig(
BaseGlobalConfig(), {"project": {"git": {"prefix_release": "release/"}}}
)
self.assertEqual("release/1.0", config.get_tag_for_version("1.0"))
def test_get_tag_for_version_beta(self):
config = BaseProjectConfig(
BaseGlobalConfig(), {"project": {"git": {"prefix_beta": "beta/"}}}
)
self.assertEqual("beta/1.0-Beta_1", config.get_tag_for_version("1.0 (Beta 1)"))
def test_get_version_for_tag(self):
config = BaseProjectConfig(
BaseGlobalConfig(),
{
"project": {
"git": {"prefix_beta": "beta/", "prefix_release": "release/"}
}
},
)
self.assertEqual("1.0", config.get_version_for_tag("release/1.0"))
def test_check_keychain(self):
config = BaseProjectConfig(BaseGlobalConfig())
with self.assertRaises(KeychainNotFound):
config._check_keychain()
def test_get_static_dependencies(self):
dep = {"namespace": "npsp", "version": "3"}
config = BaseProjectConfig(
BaseGlobalConfig(), {"project": {"dependencies": [dep]}}
)
self.assertEqual([dep], config.get_static_dependencies())
def test_get_static_dependencies_no_dependencies(self):
config = BaseProjectConfig(BaseGlobalConfig())
self.assertIsNone(config.get_static_dependencies())
def test_pretty_dependencies(self):
repo = mock.Mock(full_name="TestRepo")
dep = {
"namespace": "npsp",
"version": "3",
"boolean": False,
"dependencies": [{"repo": repo, "dependencies": []}],
}
config = BaseProjectConfig(BaseGlobalConfig())
result = "\n".join(config.pretty_dependencies([dep]))
self.assertEqual(
""" - dependencies:
- repo: TestRepo
namespace: npsp
version: 3""",
result,
)
def test_process_github_dependency(self):
global_config = BaseGlobalConfig()
config = BaseProjectConfig(global_config)
config.get_github_api = DummyGithub
config.keychain = DummyKeychain()
result = config.process_github_dependency(
{
"github": "https://github.com/SFDO-Tooling/CumulusCI-Test.git",
"unmanaged": True,
"skip": ["unpackaged/pre/skip", "unpackaged/post/skip"],
}
)
self.assertEqual(
result,
[
{
u"repo": CUMULUSCI_TEST_REPO,
u"ref": None,
u"subfolder": u"unpackaged/pre/pre",
u"unmanaged": True,
u"namespace_inject": None,
u"namespace_strip": None,
u"namespace_tokenize": None,
},
{u"version": "2", u"namespace": "ccitestdep"},
{
u"repo": CUMULUSCI_TEST_REPO,
u"ref": None,
u"subfolder": u"src",
u"unmanaged": True,
u"namespace_inject": None,
u"namespace_strip": None,
u"namespace_tokenize": None,
},
{
u"repo": CUMULUSCI_TEST_REPO,
u"ref": None,
u"subfolder": u"unpackaged/post/post",
u"unmanaged": True,
u"namespace_inject": "ccitest",
u"namespace_strip": None,
u"namespace_tokenize": None,
},
],
)
def test_process_github_dependency_with_tag(self):
global_config = BaseGlobalConfig()
config = BaseProjectConfig(global_config)
config.get_github_api = DummyGithub
config.keychain = DummyKeychain()
result = config.process_github_dependency(
{
"github": "https://github.com/SFDO-Tooling/CumulusCI-Test.git",
"tag": "release/1.0",
}
)
self.assertIn(
{
"namespace": "ccitest",
"version": "1.0",
"dependencies": [{"namespace": "ccitestdep", "version": "2"}],
},
result,
)
def test_process_github_dependency_latest(self):
global_config = BaseGlobalConfig()
config = BaseProjectConfig(global_config)
config.get_github_api = DummyGithub
config.keychain = DummyKeychain()
CUMULUSCI_TEST_DEP_REPO._releases = [
DummyRelease("beta/1.1-Beta_1", "1.1 (Beta 1)"),
DummyRelease("release/1.0"),
]
result = config.process_github_dependency(
{
"github": "https://github.com/SFDO-Tooling/CumulusCI-Test.git",
"unmanaged": True,
"skip": ["unpackaged/pre/skip", "unpackaged/post/skip"],
},
"",
include_beta=True,
)
self.assertEqual(
result,
[
{
u"repo": CUMULUSCI_TEST_REPO,
u"ref": None,
u"subfolder": u"unpackaged/pre/pre",
u"unmanaged": True,
u"namespace_inject": None,
u"namespace_strip": None,
u"namespace_tokenize": None,
},
{u"version": "1.1 (Beta 1)", u"namespace": "ccitestdep"},
{
u"repo": CUMULUSCI_TEST_REPO,
u"ref": None,
u"subfolder": u"src",
u"unmanaged": True,
u"namespace_inject": None,
u"namespace_strip": None,
u"namespace_tokenize": None,
},
{
u"repo": CUMULUSCI_TEST_REPO,
u"ref": None,
u"subfolder": u"unpackaged/post/post",
u"unmanaged": True,
u"namespace_inject": "ccitest",
u"namespace_strip": None,
u"namespace_tokenize": None,
},
],
)
CUMULUSCI_TEST_DEP_REPO._releases = None
def test_process_github_dependency_cannot_find_latest(self):
global_config = BaseGlobalConfig()
config = BaseProjectConfig(global_config)
config.get_github_api = DummyGithub
config.keychain = DummyKeychain()
CUMULUSCI_TEST_DEP_REPO._get = mock.Mock(side_effect=Exception)
with self.assertRaises(DependencyResolutionError):
config.process_github_dependency(
{"github": "https://github.com/SFDO-Tooling/CumulusCI-Test-Dep.git"}
)
del CUMULUSCI_TEST_DEP_REPO._get
class TestBaseTaskFlowConfig(unittest.TestCase):
def setUp(self):
self.task_flow_config = BaseTaskFlowConfig(
{
"tasks": {
"deploy": {"description": "Deploy Task"},
"manage": {},
"control": {},
},
"flows": {
"coffee": {"description": "Coffee Flow"},
"juice": {"description": "Juice Flow"},
},
}
)
def test_list_tasks(self):
tasks = self.task_flow_config.list_tasks()
self.assertEqual(len(tasks), 3)
deploy = [task for task in tasks if task["name"] == "deploy"][0]
self.assertEqual(deploy["description"], "Deploy Task")
def test_get_task(self):
task = self.task_flow_config.get_task("deploy")
self.assertIsInstance(task, BaseConfig)
self.assertIn(("description", "Deploy Task"), task.config.items())
def test_no_task(self):
with self.assertRaises(TaskNotFoundError):
self.task_flow_config.get_task("robotic_superstar")
def test_get_flow(self):
flow = self.task_flow_config.get_flow("coffee")
self.assertIsInstance(flow, BaseConfig)
self.assertIn(("description", "Coffee Flow"), flow.config.items())
def test_no_flow(self):
with self.assertRaises(FlowNotFoundError):
self.task_flow_config.get_flow("water")
def test_list_flows(self):
flows = self.task_flow_config.list_flows()
self.assertEqual(len(flows), 2)
coffee = [flow for flow in flows if flow["name"] == "coffee"][0]
self.assertEqual(coffee["description"], "Coffee Flow")
class TestOrgConfig(unittest.TestCase):
@mock.patch("cumulusci.core.config.OrgConfig.SalesforceOAuth2")
def test_refresh_oauth_token(self, SalesforceOAuth2):
config = OrgConfig({"refresh_token": mock.sentinel.refresh_token}, "test")
config._load_userinfo = mock.Mock()
keychain = mock.Mock()
SalesforceOAuth2.return_value = oauth = mock.Mock()
oauth.refresh_token.return_value = resp = mock.Mock()
resp.json.return_value = {}
config.refresh_oauth_token(keychain)
oauth.refresh_token.assert_called_once_with(mock.sentinel.refresh_token)
def test_refresh_oauth_token_no_connected_app(self):
config = OrgConfig({}, "test")
with self.assertRaises(AttributeError):
config.refresh_oauth_token(None)
def test_lightning_base_url(self):
config = OrgConfig({"instance_url": "https://na01.salesforce.com"}, "test")
self.assertEqual("https://na01.lightning.force.com", config.lightning_base_url)
def test_start_url(self):
config = OrgConfig(
{"instance_url": "https://na01.salesforce.com", "access_token": "TOKEN"},
"test",
)
self.assertEqual(
"https://na01.salesforce.com/secur/frontdoor.jsp?sid=TOKEN",
config.start_url,
)
def test_user_id(self):
config = OrgConfig({"id": "org/user"}, "test")
self.assertEqual("user", config.user_id)
def test_can_delete(self):
config = OrgConfig({}, "test")
self.assertFalse(config.can_delete())
```
#### File: core/tests/test_flows.py
```python
import unittest
import logging
import mock
from collections import Callable
from cumulusci.core.flows import BaseFlow
from cumulusci.core.tasks import BaseTask
from cumulusci.core.config import FlowConfig
from cumulusci.core.config import OrgConfig
from cumulusci.core.exceptions import FlowConfigError
from cumulusci.core.exceptions import FlowInfiniteLoopError
from cumulusci.core.exceptions import FlowNotReadyError
from cumulusci.core.exceptions import TaskNotFoundError
from cumulusci.core.tests.utils import MockLoggingHandler
from cumulusci.tests.util import create_project_config
import cumulusci.core
ORG_ID = "00D000000000001"
class _TaskReturnsStuff(BaseTask):
def _run_task(self):
self.return_values = {"name": "supername"}
class _TaskResponseName(BaseTask):
task_options = {"response": {"description": "the response to print"}}
def _run_task(self):
return self.options["response"]
class _TaskRaisesException(BaseTask):
task_options = {
"exception": {"description": "The exception to raise"},
"message": {"description": "The exception message"},
}
def _run_task(self):
raise self.options["exception"](self.options["message"])
class _SfdcTask(BaseTask):
salesforce_task = True
def _run_task(self):
return -1
@mock.patch("cumulusci.core.flows.BaseFlow._init_org")
class TestBaseFlow(unittest.TestCase):
""" Tests the expectations of a BaseFlow caller """
@classmethod
def setUpClass(cls):
super(TestBaseFlow, cls).setUpClass()
logger = logging.getLogger(cumulusci.core.__name__)
logger.setLevel(logging.DEBUG)
cls._flow_log_handler = MockLoggingHandler(logging.DEBUG)
logger.addHandler(cls._flow_log_handler)
def setUp(self):
self.project_config = create_project_config("TestOwner", "TestRepo")
self.project_config.config["tasks"] = {
"pass_name": {
"description": "Pass the name",
"class_path": "cumulusci.core.tests.test_flows._TaskReturnsStuff",
},
"name_response": {
"description": "Pass the name",
"class_path": "cumulusci.core.tests.test_flows._TaskResponseName",
},
"raise_exception": {
"description": "Raises an exception",
"class_path": "cumulusci.core.tests.test_flows._TaskRaisesException",
"options": {
"exception": Exception,
"message": "Test raised exception as expected",
},
},
"sfdc_task": {
"description": "An sfdc task",
"class_path": "cumulusci.core.tests.test_flows._SfdcTask",
},
}
self.project_config.config["flows"] = {
"nested_flow": {
"description": "A flow that runs inside another flow",
"steps": {1: {"task": "pass_name"}},
},
"nested_flow_2": {
"description": "A flow that runs inside another flow, and calls another flow",
"steps": {1: {"task": "pass_name"}, 2: {"flow": "nested_flow"}},
},
}
self.org_config = OrgConfig(
{"username": "<EMAIL>", "org_id": ORG_ID}, "test"
)
self._flow_log_handler.reset()
self.flow_log = self._flow_log_handler.messages
def test_init(self, mock_class):
""" BaseFlow initializes and offers a logger """
flow_config = FlowConfig({})
mock_class.return_value = None
flow = BaseFlow(self.project_config, flow_config, self.org_config)
self.assertEqual(hasattr(flow, "logger"), True)
def test_is_callable(self, mock_class):
""" BaseFlow exposes itself as a callable for use """
flow_config = FlowConfig({})
flow = BaseFlow(self.project_config, flow_config, self.org_config)
self.assertIsInstance(flow, Callable)
def test_pass_around_values(self, mock_class):
""" A flow's options reach into return values from other tasks. """
mock_class.return_value = None
# instantiate a flow with two tasks
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {
1: {"task": "pass_name"},
2: {
"task": "name_response",
"options": {"response": "^^pass_name.name"},
},
},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
# run the flow
flow()
# the flow results for the second task should be 'name'
self.assertEqual("supername", flow.step_results[1])
def test_task_options(self, mock_class):
""" A flow can accept task options and pass them to the task. """
mock_class.return_value = None
# instantiate a flow with two tasks
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {1: {"task": "name_response", "options": {"response": "foo"}}},
}
)
flow = BaseFlow(
self.project_config,
flow_config,
self.org_config,
options={"name_response__response": "bar"},
)
# run the flow
flow()
# the flow results for the first task should be 'bar'
self.assertEqual("bar", flow.step_results[0])
def test_skip_kwarg(self, mock_class):
""" A flow can receive during init a list of tasks to skip """
# instantiate a flow with two tasks
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {
1: {"task": "pass_name"},
2: {
"task": "name_response",
"options": {"response": "^^pass_name.name"},
},
},
}
)
flow = BaseFlow(
self.project_config, flow_config, self.org_config, skip=["name_response"]
)
# run the flow
flow()
# the number of tasks in the flow should be 1 instead of 2
self.assertEqual(1, len(flow.step_results))
def test_skip_task_value_none(self, mock_class):
""" A flow skips any tasks whose name is None to allow override via yaml """
# instantiate a flow with two tasks
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {1: {"task": "pass_name"}, 2: {"task": "None"}},
}
)
flow = BaseFlow(
self.project_config, flow_config, self.org_config, skip=["name_response"]
)
# run the flow
flow()
# the number of tasks in the flow should be 1 instead of 2
self.assertEqual(1, len(flow.step_results))
def test_find_step_by_name_no_steps(self, mock_class):
""" Running a flow with no steps throws an error """
# instantiate a flow with two tasks
flow_config = FlowConfig({"description": "Run two tasks"})
flow = BaseFlow(self.project_config, flow_config, self.org_config)
self.assertIsNone(flow._find_step_by_name("task"))
with self.assertRaises(FlowConfigError):
flow()
def test_find_step_by_name_not_first(self, mock_class):
""" The _find_step_by_name method skips tasks that don't exist """
# instantiate a flow with two tasks
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {
1: {"task": "pass_name"},
2: {
"task": "name_response",
"options": {"response": "^^pass_name.name"},
},
},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
task = flow._find_step_by_name("name_response")
self.assertEqual(
"cumulusci.core.tests.test_flows._TaskResponseName",
task.task_config.class_path,
)
def test_find_step_by_name__flow(self, mock_class):
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {
1: {"flow": "nested_flow"},
2: {
"task": "name_response",
"options": {
"response": "^^nested_flow.pass_name.name",
"from_flow": "^^nested_flow.name",
},
},
},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
step = flow._find_step_by_name("nested_flow")
self.assertIsInstance(step, BaseFlow)
def test_render_task_config_empty_value(self, mock_class):
""" The _render_task_config method skips option values of None """
# instantiate a flow with two tasks
flow_config = FlowConfig(
{
"description": "Run a tasks",
"steps": {1: {"task": "name_response", "options": {"response": None}}},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
task = flow._find_step_by_name("name_response")
config = flow._render_task_config(task)
self.assertEqual(["Options:"], config)
def test_task_raises_exception_fail(self, mock_class):
""" A flow aborts when a task raises an exception """
flow_config = FlowConfig(
{"description": "Run a task", "steps": {1: {"task": "raise_exception"}}}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
self.assertRaises(Exception, flow)
def test_task_raises_exception_ignore(self, mock_class):
""" A flow continues when a task configured with ignore_failure raises an exception """
flow_config = FlowConfig(
{
"description": "Run a task",
"steps": {
1: {"task": "raise_exception", "ignore_failure": True},
2: {"task": "pass_name"},
},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
self.assertEqual(2, len(flow.steps))
def test_call_no_tasks(self, mock_class):
""" A flow with no tasks will have no responses. """
flow_config = FlowConfig({"description": "Run no tasks", "steps": {}})
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
self.assertEqual([], flow.step_return_values)
self.assertEqual([], flow.steps)
def test_call_one_task(self, mock_class):
""" A flow with one task will execute the task """
flow_config = FlowConfig(
{"description": "Run one task", "steps": {1: {"task": "pass_name"}}}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
self.assertTrue(
any("Flow Description: Run one task" in s for s in self.flow_log["info"])
)
self.assertEqual([{"name": "supername"}], flow.step_return_values)
self.assertEqual(1, len(flow.steps))
def test_call_many_tasks(self, mock_class):
""" A flow with many tasks will dispatch each task """
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {1: {"task": "pass_name"}, 2: {"task": "pass_name"}},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
self.assertEqual(
[{"name": "supername"}, {"name": "supername"}], flow.step_return_values
)
self.assertEqual(2, len(flow.steps))
def test_call_task_not_found(self, mock_class):
""" A flow with reference to a task that doesn't exist in the
project will throw a TaskNotFoundError """
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {1: {"task": "pass_name"}, 2: {"task": "do_delightulthings"}},
}
)
with self.assertRaises(TaskNotFoundError):
flow = BaseFlow(self.project_config, flow_config, self.org_config)
def test_flow_prints_org_id(self, mock_class):
""" A flow with an org prints the org ID """
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {1: {"task": "pass_name"}, 2: {"task": "pass_name"}},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
org_id_logs = [s for s in self.flow_log["info"] if ORG_ID in s]
self.assertEqual(1, len(org_id_logs))
def test_flow_no_org_no_org_id(self, mock_class):
""" A flow without an org does not print the org ID """
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {1: {"task": "pass_name"}, 2: {"task": "pass_name"}},
}
)
flow = BaseFlow(self.project_config, flow_config, None)
flow()
self.assertFalse(any(ORG_ID in s for s in self.flow_log["info"]))
def test_flow_prints_org_id_once_only(self, mock_class):
""" A flow with sf tasks prints the org ID only once."""
flow_config = FlowConfig(
{
"description": "Run two tasks",
"steps": {1: {"task": "sfdc_task"}, 2: {"task": "sfdc_task"}},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
org_id_logs = [s for s in self.flow_log["info"] if ORG_ID in s]
self.assertEqual(1, len(org_id_logs))
def test_nested_flow(self, mock_class):
""" Flows can run inside other flows """
flow_config = FlowConfig(
{
"description": "Run a task and a flow",
"steps": {1: {"task": "pass_name"}, 2: {"flow": "nested_flow"}},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
self.assertEqual(2, len(flow.steps))
self.assertEqual(flow.step_return_values[0], flow.step_return_values[1][0])
def test_nested_flow_options(self, mock_class):
flow_config = FlowConfig(
{
"description": "Run a flow with task options",
"steps": {
1: {"flow": "nested_flow", "options": {"pass_name": {"foo": "bar"}}}
},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
self.assertEqual("bar", flow.steps[0].options["pass_name__foo"])
def test_nested_flow_2(self, mock_class):
""" Flows can run inside other flows and call other flows """
flow_config = FlowConfig(
{
"description": "Run a task and a flow",
"steps": {1: {"task": "pass_name"}, 2: {"flow": "nested_flow_2"}},
}
)
flow = BaseFlow(self.project_config, flow_config, self.org_config)
flow()
self.assertEqual(2, len(flow.steps))
self.assertEqual(flow.step_return_values[0], flow.step_return_values[1][0])
self.assertEqual(flow.step_return_values[0], flow.step_return_values[1][1][0])
def test_check_infinite_flows(self, mock_class):
self.project_config.config["flows"] = {
"nested_flow": {
"description": "A flow that runs inside another flow",
"steps": {1: {"flow": "nested_flow"}},
}
}
flow_config = FlowConfig({"steps": {1: {"flow": "nested_flow"}}})
with self.assertRaises(FlowInfiniteLoopError):
BaseFlow(self.project_config, flow_config, self.org_config)
def test_rejects_old_syntax(self, mock_class):
flow_config = FlowConfig({"tasks": {1: {"task": "pass_name"}}})
flow = BaseFlow(self.project_config, flow_config, self.org_config)
with self.assertRaises(FlowConfigError):
flow._get_steps_ordered()
def test_rejects_flow_and_task_in_same_step(self, mock_class):
flow_config = FlowConfig(
{"steps": {1: {"task": "pass_name", "flow": "nested_flow"}}}
)
with self.assertRaises(FlowConfigError):
BaseFlow(self.project_config, flow_config, self.org_config)
def test_call__not_prepped(self, mock_class):
flow_config = FlowConfig({})
flow = BaseFlow(self.project_config, flow_config, self.org_config, prep=False)
with self.assertRaises(FlowNotReadyError):
flow()
```
#### File: salesforce/tests/test_UpdateAdminProfile.py
```python
import mock
import os
import unittest
from cumulusci.core.config import BaseGlobalConfig
from cumulusci.core.config import BaseProjectConfig
from cumulusci.core.config import OrgConfig
from cumulusci.core.config import TaskConfig
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.tasks.salesforce import UpdateAdminProfile
from .util import create_task
ADMIN_PROFILE_BEFORE = """<?xml version='1.0' encoding='utf-8'?>
<Profile xmlns="http://soap.sforce.com/2006/04/metadata">
<applicationVisibilities>
<application>npsp__Nonprofit_CRM</application>
<default>true</default>
<visible>false</visible>
</applicationVisibilities>
<classAccess>
<apexClass>TestClass</apexClass>
<enabled>false</enabled>
</classAccess>
<fieldPermissions>
<field>Account.TestField__c</field>
<editable>false</editable>
<readable>false</readable>
</fieldPermissions>
<pageAccesses>
<apexPage>TestPage</apexPage>
<enabled>false</enabled>
</pageAccesses>
<recordTypeVisibilities>
<recordType>Account.Business_Account</recordType>
<default>true</default>
<personAccountDefault>true</personAccountDefault>
<visible>true</visible>
</recordTypeVisibilities>
<recordTypeVisibilities>
<recordType>Account.HH_Account</recordType>
<default>false</default>
<personAccountDefault>false</personAccountDefault>
<visible>false</visible>
</recordTypeVisibilities>
<tabVisibilities>
<tab>NPSP_Settings</tab>
<visibility>Hidden</visibility>
</tabVisibilities>
</Profile>"""
ADMIN_PROFILE_EXPECTED = """<?xml version='1.0' encoding='utf-8'?>
<Profile xmlns="http://soap.sforce.com/2006/04/metadata">
<applicationVisibilities>
<application>npsp__Nonprofit_CRM</application>
<default>true</default>
<visible>true</visible>
</applicationVisibilities>
<classAccess>
<apexClass>TestClass</apexClass>
<enabled>true</enabled>
</classAccess>
<fieldPermissions>
<field>Account.TestField__c</field>
<editable>true</editable>
<readable>true</readable>
</fieldPermissions>
<pageAccesses>
<apexPage>TestPage</apexPage>
<enabled>true</enabled>
</pageAccesses>
<recordTypeVisibilities>
<recordType>Account.Business_Account</recordType>
<default>false</default>
<personAccountDefault>false</personAccountDefault>
<visible>true</visible>
</recordTypeVisibilities>
<recordTypeVisibilities>
<recordType>Account.HH_Account</recordType>
<default>true</default>
<personAccountDefault>true</personAccountDefault>
<visible>true</visible>
</recordTypeVisibilities>
<tabVisibilities>
<tab>NPSP_Settings</tab>
<visibility>DefaultOn</visibility>
</tabVisibilities>
</Profile>"""
class TestUpdateAdminProfile(unittest.TestCase):
maxDiff = None
def test_run_task(self):
task = create_task(
UpdateAdminProfile,
{
"record_types": [
{
"record_type": "Account.HH_Account",
"default": True,
"person_account_default": True,
}
],
"namespaced_org": True,
},
)
def _retrieve_unpackaged():
profiles_path = os.path.join(task.tempdir, "profiles")
admin_profile_path = os.path.join(profiles_path, "Admin.profile")
os.mkdir(profiles_path)
with open(admin_profile_path, "w") as f:
f.write(ADMIN_PROFILE_BEFORE)
def _check_result():
with open(
os.path.join(task.tempdir, "profiles", "Admin.profile"), "r"
) as f:
result = f.read()
self.assertMultiLineEqual(ADMIN_PROFILE_EXPECTED, result)
task._retrieve_unpackaged = _retrieve_unpackaged
task._deploy_metadata = _check_result
task()
def test_run_task__record_type_not_found(self):
task = create_task(
UpdateAdminProfile,
{"record_types": [{"record_type": "DOESNT_EXIST"}], "namespaced_org": True},
)
def _retrieve_unpackaged():
profiles_path = os.path.join(task.tempdir, "profiles")
admin_profile_path = os.path.join(profiles_path, "Admin.profile")
os.mkdir(profiles_path)
with open(admin_profile_path, "w") as f:
f.write(ADMIN_PROFILE_BEFORE)
task._retrieve_unpackaged = _retrieve_unpackaged
with self.assertRaises(TaskOptionsError):
task()
@mock.patch("cumulusci.salesforce_api.metadata.ApiRetrieveUnpackaged.__call__")
def test_retrieve_unpackaged(self, ApiRetrieveUnpackaged):
task = create_task(UpdateAdminProfile)
task.tempdir = "/tmp"
task._retrieve_unpackaged()
ApiRetrieveUnpackaged.assert_called_once()
def test_deploy_metadata(self):
task = create_task(UpdateAdminProfile)
task.tempdir = "/tmp"
task._get_api = mock.Mock()
task._deploy_metadata()
task._get_api.assert_called_once()
``` |
{
"source": "1Harshini/empress",
"score": 3
} |
#### File: empress/empress/compression_utils.py
```python
def remove_empty_samples_and_features(table, sample_metadata, ordination=None):
"""Removes empty samples and features from the table and sample metadata.
This should be called *after* matching the table with the sample metadata
and other input artifacts: we assume that the columns of the table
DataFrame are equivalent to the indices of the sample metadata DataFrame.
Parameters
----------
table: biom.Table
Representation of a feature table.
sample_metadata: pd.DataFrame
Sample metadata. The index should describe sample IDs, and the columns
should describe sample metadata fields (e.g. "body site").
ordination: skbio.OrdinationResults, optional
Ordination information to show in Emperor alongside Empress. If this is
passed, this function will check to see if any of the empty samples
or features to be removed from the table are included in the
ordination; if so, this will raise an error (because these empty
items shouldn't be in the ordination in the first place).
Returns
-------
filtered_table: biom.Table
Copy of the input feature table with empty samples and features
removed.
filtered_sample_metadata: pd.DataFrame
Copy of the input sample metadata with empty samples removed.
Raises
------
ValueError
- If the input table is completely empty (i.e. all zeroes).
- If ordination is not None, and the ordination contains empty samples
or features.
References
----------
- Adapted from qurro._df_utils.remove_empty_samples_and_features().
"""
orig_tbl_samples = set(table.ids())
orig_tbl_features = set(table.ids(axis='observation'))
# this code is equivalent to the PR below, we should update once that gets
# merged and a newer BIOM release is publicly available
# https://github.com/biocore/biom-format/pull/847
filtered_table = table.copy()
for ax in {'observation', 'sample'}:
filtered_table = filtered_table.filter(
table.ids(axis=ax)[table.sum(axis=ax) > 0], axis=ax,
inplace=False)
if filtered_table.is_empty():
raise ValueError("All samples / features in matched table are empty.")
# Let user know about which samples/features may have been dropped, if any.
# Also, if we dropped any empty samples, update the sample metadata.
filtered_sample_metadata = sample_metadata
sample_diff = orig_tbl_samples - set(filtered_table.ids())
if sample_diff:
if ordination is not None:
empty_samples_in_ord = sample_diff & set(ordination.samples.index)
if empty_samples_in_ord:
raise ValueError(
(
"The ordination contains samples that are empty (i.e. "
"all 0s) in the table. Problematic sample IDs: {}"
).format(", ".join(sorted(empty_samples_in_ord)))
)
filtered_sample_metadata = filtered_sample_metadata.loc[
filtered_table.ids()
]
print("Removed {} empty sample(s).".format(len(sample_diff)))
feature_diff = orig_tbl_features - \
set(filtered_table.ids(axis='observation'))
if feature_diff:
if ordination is not None and ordination.features is not None:
empty_feats_in_ord = feature_diff & set(ordination.features.index)
if empty_feats_in_ord:
raise ValueError(
(
"The ordination contains features that are empty "
"(i.e. all 0s) in the table. Problematic feature IDs: "
"{}"
).format(", ".join(sorted(empty_feats_in_ord)))
)
print("Removed {} empty feature(s).".format(len(feature_diff)))
return filtered_table, filtered_sample_metadata
def compress_table(table):
"""Converts a feature table to a space-saving format.
Parameters
----------
table: biom.Table
Representation of a feature table. It is assumed that empty samples /
features have already been removed from the table.
Returns
-------
(s_ids, f_ids, s_ids_to_indices, f_ids_to_indices, compressed_table)
s_ids: list
List of the sample IDs in the table.
f_ids: list
List of the feature IDs in the table, analogous to s_ids.
s_ids_to_indices: dict
Inverse of s_ids: this maps sample IDs to their indices in s_ids.
"Indices" refers to a feature or sample's 0-based position in f_ids
or s_ids, respectively.
f_ids_to_indices: dict
Inverse of f_ids: this maps feature IDs to their indices in f_ids,
analogous to s_ids_to_indices.
compressed_table: list
Two-dimensional list. The "outer list" is of length len(s_ids).
Each position i within this outer list holds an "inner list" of
arbitrary (but within the range [1, len(f_ids)]) length.
The i-th inner list contains the feature indices of the
features present (i.e. at any abundance > 0) within the
sample with index i. Each inner list is sorted in ascending order.
References
----------
- Inspired by redbiom and Qurro's JSON data models.
"""
feature_ids = table.ids(axis='observation')
sample_ids = table.ids()
f_ids_to_indices = {fid: idx for idx, fid in enumerate(feature_ids)}
s_ids_to_indices = {sid: idx for idx, sid in enumerate(sample_ids)}
compressed_table = []
for vec in table.iter_data(axis='sample', dense=False):
compressed_table.append([int(i) for i in vec.indices])
return (
list(sample_ids), list(feature_ids), s_ids_to_indices,
f_ids_to_indices, compressed_table
)
def compress_sample_metadata(s_ids_to_indices, metadata):
"""Converts a sample metadata DataFrame to a space-saving format.
We could ostensibly save more space by identifying repeated metadata
values and mapping *those* to integer IDs. (For example, a lot of Qiita
studies' sample metadata files have lots of frequently repeated values like
"host_subject_id", the various empo_* fields, etc.) However, that may be
1) overkill and 2) not worth it until we get to really big datasets
(and/or datasets with lots of repeated values).
Parameters
----------
s_ids_to_indices: dict
Maps sample IDs (strings) to 0-based indices in an existing list of
sample IDs. In practice, this should just be the "s_ids_to_indices"
output from compress_table().
metadata: pd.DataFrame
Sample metadata. The index should describe sample IDs, and the columns
should describe sample metadata fields (e.g. "body site").
The sample IDs in the index should match one-to-one with the keys in
s_ids_to_indices.
Returns
-------
(metadata_columns, metadata_vals)
metadata_columns: list
List of the sample metadata column names, all converted to strings.
metadata_vals: list
Two-dimensional list. The "outer list" is of length
len(s_ids_to_indices.keys()). Each position i within this outer
list holds an "inner list" of length len(metadata_columns).
The c-th value of the i-th inner list contains the c-th
sample metadata column (in metadata_columns)'s value for the
sample with index i, converted to a string.
Raises
------
ValueError
- If the metadata's index and the keys of s_ids_to_indices do not
contain the exact same elements.
- If the values of s_ids_to_indices are invalid: that is, if sorting
the values in ascending order does not produce a list of
[0, 1, 2, 3, ..., len(s_ids_to_indices.keys())].
References
----------
- Inspired by redbiom and Qurro's JSON data models.
"""
sample_ids = s_ids_to_indices.keys()
# NOTE: I think that identically-named samples or metadata columns will
# break this check, but I also think that we can assume by this point that
# the data is at least that sane. (Checking that should be a responsibility
# for earlier in the program.)
if set(sample_ids) != set(metadata.index):
raise ValueError(
"The sample IDs in the metadata's index and s_ids_to_indices are "
"not identical."
)
if sorted(s_ids_to_indices.values()) != list(range(len(sample_ids))):
raise ValueError("Indices (values) of s_ids_to_indices are invalid.")
# Rename sample IDs to indices in the metadata
indexed_metadata = metadata.rename(index=s_ids_to_indices)
# Sort the metadata's rows by the sample indices
sorted_i_metadata = indexed_metadata.sort_index(
axis="index", ascending=True
)
# Convert all of the metadata values to strings
str_s_i_metadata = sorted_i_metadata.astype(str)
# Generate a 2-D list of metadata values
# Based on https://datatofish.com/convert-pandas-dataframe-to-list
sm_vals = str_s_i_metadata.values.tolist()
sm_cols = [str(c) for c in str_s_i_metadata.columns]
return sm_cols, sm_vals
def compress_feature_metadata(tip_metadata, int_metadata):
"""Converts tip/internal node metadata DataFrames to dicts to save space.
This is a pretty early optimization -- ideally we would use 2-D lists as
our final metadata structure, similar to the table / sample metadata
compression. This should be revisited when the tree data node-name
revamping has been merged in.
Parameters
----------
tip_metadata: pd.DataFrame or None
Metadata for tip nodes. If not None, the index should describe node
names, and the columns should describe feature metadata fields.
int_metadata: pd.DataFrame or None
Metadata for internal nodes. If not None, the index should describe
node names, and the columns should describe feature metadata fields.
Note that the columns of tip_metadata and int_metadata should be identical,
even if the feature metadata only describes tip or internal nodes. (In that
case, then the other feature metadata parameter should still be a DataFrame
-- albeit an empty one, with no feature names in its index.) The only case
in which the parameters should be None is if there was no feature metadata
at all.
Returns
-------
(metadata_columns, compressed_tip_metadata, compressed_int_metadata)
metadata_columns: list
List of the feature metadata column names, all converted to
strings. If both input DFs are None, this will be {}.
compressed_tip_metadata: dict
Maps node names in tip_metadata to a list of feature metadata
values, in the same order as in metadata_columns and converted to
strings. If tip_metadata was empty, or if both input DFs were None,
this will be {}.
compressed_int_metadata: dict
Maps node names in int_metadata to a list of feature metadata
values, in the same order as in metadata_columns and converted to
strings. If int_metadata was empty, or if both input DFs were None,
this will be {}.
Raises
------
ValueError
- If only one of tip_metadata and int_metadata is None.
- If the columns of tip_metadata are not identical to the columns of
int_metadata.
- If both the tip and internal node metadata DataFrames are empty.
References
----------
- Inspired by redbiom and Qurro's JSON data models.
"""
# If the user didn't pass in any feature metadata, we'll get to this block
if tip_metadata is None and int_metadata is None:
return [], {}, {}
# *This* should never happen. If it did, it's a sign that this function is
# being misused. (The ^ is a logical XOR; see
# https://stackoverflow.com/a/432844/10730311.)
if (tip_metadata is None) ^ (int_metadata is None):
raise ValueError(
"Only one of tip & int. node feature metadata is None."
)
# Verify that columns match up btwn. tip and internal node metadata
if not tip_metadata.columns.equals(int_metadata.columns):
raise ValueError("Tip & int. node feature metadata columns differ.")
# Verify that at least one feature metadata entry exists (since at this
# point we know that there should be at least *some* feature metadata)
if tip_metadata.empty and int_metadata.empty:
raise ValueError("Both tip & int. node feature metadata are empty.")
fm_cols = [str(c) for c in tip_metadata.columns]
# We want dicts mapping each feature ID to a list of the f.m. values for
# this feature ID. Since we're not mapping feature IDs to indices first,
# this is pretty simple to do with DataFrame.to_dict() using the
# orient="list" option -- however, orient="list" uses column-major order,
# so we transpose the metadata DFs before calling to_dict() in order to
# make sure our dicts are in row-major order (i.e. feature IDs are keys).
#
# (Also, while we're at it, we make sure that both DFs' values are all
# converted to strings.)
compressed_tm = tip_metadata.astype(str).T.to_dict(orient="list")
compressed_im = int_metadata.astype(str).T.to_dict(orient="list")
return fm_cols, compressed_tm, compressed_im
```
#### File: empress/empress/core.py
```python
from empress.tree import validate_tree, bp_tree_tips
from empress.tools import (
match_inputs, match_tree_and_feature_metadata,
shifting, filter_feature_metadata_to_tree
)
from empress.compression_utils import (
remove_empty_samples_and_features, compress_table,
compress_sample_metadata, compress_feature_metadata
)
import pkg_resources
import os
import pandas as pd
from shutil import copytree
from emperor import Emperor
from jinja2 import Environment, FileSystemLoader
SUPPORT_FILES = pkg_resources.resource_filename('empress', 'support_files')
TEMPLATES = os.path.join(SUPPORT_FILES, 'templates')
EMPEROR_CALLBACK_PATH = os.path.join(SUPPORT_FILES, 'js',
'emperor-callbacks.js')
class Empress():
def __init__(self, tree, table=None, sample_metadata=None,
feature_metadata=None, ordination=None,
ignore_missing_samples=False, filter_extra_samples=False,
filter_missing_features=False, resource_path=None,
shear_to_table=True, shear_to_feature_metadata=False):
"""Visualize a phylogenetic tree
Use this object to interactively display a phylogenetic tree using the
Empress GUI.
Note that the table and sample metadata must either both be specified
or both be None. If only one of them is None, this will raise a
ValueError. If both are None, then the values of the ordination,
ignore_missing_samples, filter_extra_samples, filter_missing_features,
and shear_to_table arguments will be ignored since no sample
information is available.
Parameters
----------
tree: bp.BP
The phylogenetic tree to visualize.
table: biom.Table, optional
The matrix to visualize paired with the phylogenetic tree.
sample_metadata: pd.DataFrame, optional
DataFrame object with the metadata associated to the samples in the
``ordination`` object, should have an index set and it should match
the identifiers in the ``ordination`` object.
feature_metadata: pd.DataFrame, optional
DataFrame object with the metadata associated to the names of
tips and/or internal nodes in the ``tree`` object, should have an
index set and it should match at least one of these nodes' names.
ordination: skbio.OrdinationResults, optional
Object containing the computed values for an ordination method in
scikit-bio. Currently supports skbio.stats.ordination.PCoA and
skbio.stats.ordination.RDA results.
ignore_missing_samples: bool, optional (default False)
If True, pads missing samples (i.e. samples in the table but not
the metadata) with placeholder metadata. If False, raises a
DataMatchingError if any such samples exist. (Note that in either
case, samples in the metadata but not in the table are filtered
out; and if no samples are shared between the table and metadata, a
DataMatchingError is raised regardless.) This is analogous to the
ignore_missing_samples flag in Emperor.
filter_extra_samples: bool, optional (default False)
If True, ignores samples in the feature table that are not present
in the ordination. If False, raises a DataMatchingError if such
samples exist.
filter_missing_features: bool, optional (default False)
If True, filters features from the table that aren't present as
tips in the tree. If False, raises a DataMatchingError if any such
features exist. (Note that in either case, features in the tree but
not in the table are preserved.)
resource_path: str, optional
Load the resources from a user-specified remote location. If set to
None resources are loaded from the current directory.
shear_to_table: bool, optional
If True, shears the tree to just the tips that are present as
features in the feature table. Otherwise, the tree is not shorn.
Attributes
----------
tree:
Phylogenetic tree.
table:
Contingency matrix for the phylogeny.
samples:
Sample metadata.
features:
Feature metadata.
ordination:
Ordination matrix to visualize simultaneously with the tree.
base_url:
Base path to the remote resources.
"""
self.tree = tree
# Use XOR to verify that either both or neither of the table and
# sample metadata are None. Parens needed for precedence stuff.
if (table is None) ^ (sample_metadata is None):
# The caller messed something up, so raise an error.
# It should not be possible for the user to pass *just* one of
# these things (qiime empress community-plot requires both, and
# qiime empress tree-plot accepts neither).
raise ValueError(
"Both the table and sample metadata should be specified or "
"None. However, only one of them is None."
)
elif table is not None and sample_metadata is not None:
self.is_community_plot = True
else:
self.is_community_plot = False
self.table = table
if sample_metadata is not None:
self.samples = sample_metadata.copy()
else:
self.samples = None
if feature_metadata is not None:
# this will be transformed into self.tip_md and self.int_md in
# self._validate_and_match_data()
self.features = feature_metadata.copy()
else:
if shear_to_feature_metadata:
raise ValueError(
"Feature metadata must be provided in order to shear "
"to feature metadata."
)
self.features = None
self.ordination = ordination
self.is_empire_plot = (self.ordination is not None)
self.base_url = resource_path
if self.base_url is None:
self.base_url = 'support_files'
self._validate_and_match_data(
ignore_missing_samples,
filter_extra_samples,
filter_missing_features,
shear_to_table,
shear_to_feature_metadata,
)
if self.is_empire_plot:
# biplot arrows can optionally have metadata, think for example
# a study where the arrows represent pH, Alkalinity, etc.
# Therefore, check if there are matches in the metadata, if
# there aren't additional errors can be overriden with the
# ignore_missing_samples flag
feature_metadata = None
if self.ordination.features is not None:
# if there are no matches set to None so Emperor can ignore
# the feature metadata
if self.tip_md is None and self.int_md is None:
feature_metadata = pd.DataFrame()
else:
feature_metadata = pd.concat([self.tip_md, self.int_md])
arrows = self.ordination.features.index
if (feature_metadata.index.intersection(arrows).empty or
feature_metadata.empty):
feature_metadata = None
self._emperor = Emperor(
self.ordination, mapping_file=self.samples,
feature_mapping_file=feature_metadata,
ignore_missing_samples=ignore_missing_samples,
remote='./emperor-resources')
else:
self._emperor = None
def _validate_and_match_data(self, ignore_missing_samples,
filter_extra_samples,
filter_missing_features,
shear_to_table,
shear_to_feature_metadata):
if self.is_community_plot:
# Hack to unpack long tuples: https://stackoverflow.com/q/26036143
(
self.table, self.samples, self.tip_md, self.int_md,
self.tax_cols
) = match_inputs(
self.tree, self.table, self.samples, self.features,
self.ordination, ignore_missing_samples, filter_extra_samples,
filter_missing_features
)
# Remove empty samples and features from the table (and remove the
# removed samples from the sample metadata). We also pass in the
# ordination, if present, to this function -- so we can throw an
# error if the ordination actually contains these empty
# samples/features.
#
# We purposefully do this removal *after* matching (so we know the
# data inputs match up) and *before* shearing (so empty features
# in the table are no longer included as tips in the tree).
self.table, self.samples = remove_empty_samples_and_features(
self.table, self.samples, self.ordination
)
# remove unobserved features from the phylogeny (shear the tree)
if shear_to_table:
features = set(self.table.ids(axis='observation'))
self.tree = self.tree.shear(features)
# Remove features in the feature metadata that are no longer
# present in the tree, due to being shorn off
if self.tip_md is not None or self.int_md is not None:
# (Technically they should always both be None or both be
# DataFrames -- there's no in-between)
self.tip_md, self.int_md = filter_feature_metadata_to_tree(
self.tip_md, self.int_md, self.tree
)
else:
if shear_to_feature_metadata:
features = set(self.features.index)
all_tips = set(bp_tree_tips(self.tree))
# check that feature metadata contains at least 1 tip
if not features.intersection(all_tips):
raise ValueError(
"Cannot shear tree to feature metadata: no tips in "
"the tree are present in the feature metadata."
)
self.tree = self.tree.shear(features)
(
self.tip_md, self.int_md, self.tax_cols
) = match_tree_and_feature_metadata(self.tree, self.features)
validate_tree(self.tree)
def copy_support_files(self, target=None):
"""Copies the support files to a target directory
If an ordination is included Emperor's support files will also be
copied over (in a directory named emperor-resources).
Parameters
----------
target : str
The path where resources should be copied to. By default it copies
the files to ``self.base_url``.
"""
if target is None:
target = self.base_url
# copy the required resources
copytree(SUPPORT_FILES, os.path.join(target, 'support_files'))
if self._emperor is not None:
self._emperor.copy_support_files(os.path.join(target,
'emperor-resources'))
def __str__(self):
return self.make_empress()
def make_empress(self):
"""Build an empress plot
Returns
-------
str
Formatted empress plot.
Notes
-----
Once you generate the plot (and write it to a HTML file in a given
directory) you will need to copy the support files (the JS/CSS/etc.
code needed to view the visualization) to the same directory by calling
the ``copy_support_files`` method.
See Also
--------
empress.core.Empress.copy_support_files
"""
main_template = self._get_template()
# _process_data does a lot of munging to the coordinates data and
# to_dict puts the data into a dictionary-like object for consumption
data = self.to_dict()
plot = main_template.render(data)
return plot
def to_dict(self):
"""Convert processed data into a dictionary
Warning: the object returned by to_dict will contain references to
internal variables. Exercise caution if modifying the value of objects
returned by to_dict.
Returns
-------
dict
A dictionary describing the plots contained in the ordination
object and the sample + feature metadata.
"""
s_ids = f_ids = cmp_table = sm_cols = compressed_sm = None
sid2idxs = fid2idxs = {}
if self.is_community_plot:
# The fid2idxs dict we get from compress_table() is temporary --
# later, we'll restructure it so that the keys (feature IDs) are
# nodes' postorder positions in the tree rather than arbitrary
# unique integers. (TODO: it should be possible to speed this up by
# passing the tree to compress_table() so postorder positions can
# immediately be used as keys / feature IDs without an intermediate
# step.)
s_ids, f_ids, sid2idxs, fid2idxs_t, cmp_table = compress_table(
self.table
)
sm_cols, compressed_sm = compress_sample_metadata(
sid2idxs, self.samples
)
fm_cols, compressed_tm_tmp, compressed_im_tmp = \
compress_feature_metadata(self.tip_md, self.int_md)
# Use nodes' postorder positions as their "IDs" for the BIOM table and
# feature metadata
compressed_tm = {}
compressed_im = {}
# bptree indices start at one, hence we pad the arrays
names = [-1]
lengths = [-1]
for i in range(1, len(self.tree) + 1):
node = self.tree.postorderselect(i)
name = self.tree.name(node)
names.append(name)
lengths.append(self.tree.length(node))
if self.is_community_plot and name in fid2idxs_t:
fid2idxs[i] = fid2idxs_t[name]
f_ids[fid2idxs[i]] = i
if name in compressed_tm_tmp:
compressed_tm[i] = compressed_tm_tmp[name]
# Note: for internal metadata, node names may not be unique. Thus,
# we duplicate the internal node metadata for each node in the
# metadata with the same name.
if name in compressed_im_tmp:
compressed_im[i] = compressed_im_tmp[name]
data_to_render = {
'base_url': self.base_url,
# tree info
'tree': shifting(self.tree.B),
'lengths': lengths,
'names': names,
# Should we show sample metadata coloring / animation panels?
'is_community_plot': self.is_community_plot,
# Are we working with an EMPire plot?
'is_empire_plot': self.is_empire_plot,
# feature table
's_ids': s_ids,
'f_ids': f_ids,
's_ids_to_indices': sid2idxs,
'f_ids_to_indices': fid2idxs,
'compressed_table': cmp_table,
# sample metadata
'sample_metadata_columns': sm_cols,
'compressed_sample_metadata': compressed_sm,
# feature metadata
'feature_metadata_columns': fm_cols,
'split_taxonomy_columns': self.tax_cols,
'compressed_tip_metadata': compressed_tm,
'compressed_int_metadata': compressed_im,
# Emperor integration
'emperor_div': '',
'emperor_require_logic': '',
'emperor_style': '',
'emperor_base_dependencies': '',
'emperor_classes': ''
}
if self._emperor is not None:
data_to_render.update(self._scavenge_emperor())
return data_to_render
def _get_template(self, standalone=False):
"""Get the jinja template object
Parameters
----------
standalone: bool, optional
Whether or not the generated plot will load resources locally
(``True``), or from a specified URL (``False``).
Returns
-------
jinja2.Template
Template where the plot is created.
"""
# based on: http://stackoverflow.com/a/6196098
env = Environment(loader=FileSystemLoader(TEMPLATES))
return env.get_template('empress-template.html')
def _scavenge_emperor(self):
self._emperor.width = '50vw'
self._emperor.height = '100vh; float: right'
# make the background white so it matches Empress
self._emperor.set_background_color('white')
self._emperor.set_axes(color='black')
# The following line references will be replace with API calls to the
# Emperor object, however those are not implemented yet
emperor_base_dependencies = self._emperor.render_base_dependencies()
style = self._emperor.render_style()
# main divs for emperor
emperor_div = self._emperor.render_html('emperor-in-empire')
# main js script for emperor, including additional callbacks
with open(EMPEROR_CALLBACK_PATH) as f:
self._emperor.js_on_ready = f.read()
emperor_require_logic = self._emperor.render_js('emperor-in-empire')
emperor_data = {
'emperor_div': emperor_div,
'emperor_require_logic': emperor_require_logic,
'emperor_style': style,
'emperor_base_dependencies': emperor_base_dependencies,
'emperor_classes': 'combined-plot-container'
}
return emperor_data
```
#### File: tests/python/test_cli.py
```python
import os
import unittest
from click.testing import CliRunner
import pandas as pd
from empress.scripts._cli import empress
from .util import extract_q2_artifact_to_path
def files_present(output_dir):
files = os.listdir(output_dir)
assert "empress.html" in files
assert os.path.isdir(f"{output_dir}/support_files")
class TestCLI(unittest.TestCase):
@classmethod
def setUpClass(cls):
q2_tree_loc = os.path.abspath("docs/moving-pictures/rooted-tree.qza")
q2_table_loc = os.path.abspath("docs/moving-pictures/table.qza")
q2_fm_loc = os.path.abspath("docs/moving-pictures/taxonomy.qza")
q2_pcoa_loc = os.path.abspath("docs/moving-pictures/biplot.qza")
q2_sm_loc = os.path.abspath(
"docs/moving-pictures/sample_metadata.tsv"
)
# create isolated filesystem for tests in this file
# manually using __enter__ so that we can run all tests and close in
# tearDown rather than use 'with runner.isolated_filesystem():'
cls.runner = CliRunner()
cls.iso_fs = cls.runner.isolated_filesystem()
cls.iso_fs.__enter__()
# extract Artifacts to temporary filesystem
cls.tree_loc = extract_q2_artifact_to_path("tree", q2_tree_loc,
"tree.nwk")
cls.table_loc = extract_q2_artifact_to_path("tbl", q2_table_loc,
"feature-table.biom")
cls.fm_loc = extract_q2_artifact_to_path("fm", q2_fm_loc,
"taxonomy.tsv")
cls.pcoa_loc = extract_q2_artifact_to_path("pcoa", q2_pcoa_loc,
"ordination.txt")
# need to re-save sample metadata to remove q2:types row
cls.sm_loc = "tmp_sample_metadata.tsv"
pd.read_csv(q2_sm_loc, sep="\t", index_col=0, skiprows=[1]).to_csv(
cls.sm_loc,
sep="\t",
index=True
)
@classmethod
def tearDownClass(cls):
# https://stackoverflow.com/questions/51706836/manually-open-context-manager
cls.iso_fs.__exit__(None, None, None)
def test_tree_plot_basic(cls):
output_dir = "tree_plot_basic"
result = cls.runner.invoke(
empress,
["tree-plot", "--tree", cls.tree_loc, "--output-dir", output_dir]
)
assert result.exit_code == 0
files_present(output_dir)
def test_comm_plot_basic(cls):
output_dir = "comm_plot_basic"
result = cls.runner.invoke(
empress,
["community-plot", "--tree", cls.tree_loc, "--table",
cls.table_loc, "--sample-metadata", cls.sm_loc,
"--output-dir", output_dir]
)
assert result.exit_code == 0
files_present(output_dir)
def test_comm_plot_pcoa(cls):
output_dir = "comm_plot_pcoa"
result = cls.runner.invoke(
empress,
["community-plot", "--tree", cls.tree_loc, "--table",
cls.table_loc, "--sample-metadata", cls.sm_loc,
"--output-dir", output_dir, "--pcoa", cls.pcoa_loc,
"--filter-extra-samples", "--feature-metadata", cls.fm_loc]
)
assert result.exit_code == 0
files_present(output_dir)
assert os.path.isdir(f"{output_dir}/emperor-resources")
def test_existing_directory(cls):
output_dir = "existing_dir"
os.mkdir("existing_dir")
result = cls.runner.invoke(
empress,
["community-plot", "--tree", cls.tree_loc, "--table",
cls.table_loc, "--sample-metadata", cls.sm_loc,
"--output-dir", output_dir]
)
assert result.exit_code == 1
error_class, value, _ = result.exc_info
assert error_class == OSError
assert str(value) == "Output directory already exists!"
assert not os.path.isdir(f"{output_dir}/support_files")
assert "empress.html" not in os.listdir(output_dir)
def test_tree_plot_basic_cli_abbrev(cls):
output_dir = "tree_plot_basic_cli_abbrev"
result = cls.runner.invoke(
empress,
["tree-plot", "-t", cls.tree_loc, "-o", output_dir]
)
assert result.exit_code == 0
files_present(output_dir)
``` |
{
"source": "1heart/differentiable-robot-model",
"score": 2
} |
#### File: differentiable-robot-model/tests/test_kinematics_dynamics.py
```python
import os
import random
from dataclasses import dataclass
import torch
import numpy as np
import pytest
import pybullet as p
import diff_robot_data
from differentiable_robot_model.differentiable_robot_model import (
DifferentiableRobotModel,
LearnableRigidBodyConfig,
)
# (rel_urdf_path, test_link_list)
test_data = [
# Toy
("2link_robot.urdf", [(2, "endEffector")]),
# Kuka iiwa
("kuka_iiwa/urdf/iiwa7.urdf", [(7, "iiwa_link_ee")]),
# Franka_panda
("panda_description/urdf/panda_no_gripper.urdf", [(7, "panda_virtual_ee_link")]),
# Allegro hand
(
"allegro/urdf/allegro_hand_description_left.urdf",
[
(4, "link_11.0_tip"),
(9, "link_7.0_tip"),
(14, "link_3.0_tip"),
(19, "link_15.0_tip"),
],
),
]
################
# Dataclasses
################
@dataclass
class MetaTestInfo:
urdf_path: str
link_list: list
zero_vel: bool
zero_acc: bool
@dataclass
class PybulletInstance:
pc_id: int
robot_id: int
num_joints: int
@dataclass
class SampledTestCase:
joint_pos: list
joint_vel: list
joint_acc: list
################
# Arrange
################
@pytest.fixture(params=test_data)
def test_info(request):
rel_urdf_path = request.param[0]
robot_description_folder = diff_robot_data.__path__[0]
urdf_path = os.path.join(robot_description_folder, rel_urdf_path)
return MetaTestInfo(
urdf_path=urdf_path, link_list=request.param[1], zero_vel=False, zero_acc=False
)
# Setup pybullet
@pytest.fixture
def sim(test_info):
pc_id = p.connect(p.DIRECT)
robot_id = p.loadURDF(
test_info.urdf_path,
basePosition=[0, 0, 0],
useFixedBase=True,
flags=p.URDF_USE_INERTIA_FROM_FILE,
physicsClientId=pc_id,
)
p.setGravity(0, 0, -9.81, physicsClientId=pc_id)
num_joints = p.getNumJoints(robot_id, physicsClientId=pc_id)
return PybulletInstance(
pc_id=pc_id,
robot_id=robot_id,
num_joints=num_joints,
)
# Setup differentiable robot model
@pytest.fixture
def robot_model(test_info):
return DifferentiableRobotModel(test_info.urdf_path, LearnableRigidBodyConfig())
# Setup test
@pytest.fixture
def setup_dict(request, test_info, sim, robot_model):
# Get num dofs
num_dofs = len(robot_model.get_joint_limits())
# Update pybullet joint damping
for link_idx in range(sim.num_joints):
joint_damping = robot_model._bodies[link_idx + 1].get_joint_damping_const()
p.changeDynamics(
sim.robot_id,
link_idx,
linearDamping=0.0,
angularDamping=0.0,
jointDamping=joint_damping,
physicsClientId=sim.pc_id,
)
p.changeDynamics(
sim.robot_id, link_idx, maxJointVelocity=200, physicsClientId=sim.pc_id
)
# Set all seeds to ensure reproducibility
random.seed(0)
np.random.seed(1)
torch.manual_seed(0)
# Sample test cases
limits_per_joint = robot_model.get_joint_limits()
joint_lower_bounds = [joint["lower"] for joint in limits_per_joint]
joint_upper_bounds = [joint["upper"] for joint in limits_per_joint]
joint_velocity_limits = [0.01 * joint["velocity"] for joint in limits_per_joint]
# NOTE: sample low velocities since PyBullet inhibits unknown clipping for large damping forces
# (encountered with the allegro hand urdf)
joint_angles = []
joint_velocities = []
joint_accelerations = []
for i in range(len(limits_per_joint)):
joint_angles.append(
np.random.uniform(low=joint_lower_bounds[i], high=joint_upper_bounds[i])
)
if test_info.zero_vel:
joint_velocities.append(0.0)
else:
joint_velocities.append(
np.random.uniform(
low=-joint_velocity_limits[i], high=joint_velocity_limits[i]
)
)
if test_info.zero_acc:
joint_accelerations.append(0.0)
else:
joint_accelerations.append(
np.random.uniform(
low=-joint_velocity_limits[i] * 2.0,
high=joint_velocity_limits[i] * 2.0,
)
)
return {
"robot_model": robot_model,
"sim": sim,
"num_dofs": num_dofs,
"test_case": SampledTestCase(
joint_pos=joint_angles,
joint_vel=joint_velocities,
joint_acc=joint_accelerations,
),
}
################
# Act
################
# Helper functions
def extract_setup_dict(setup_dict):
robot_model = setup_dict["robot_model"]
sim = setup_dict["sim"]
num_dofs = setup_dict["num_dofs"]
test_case = setup_dict["test_case"]
return robot_model, sim, num_dofs, test_case
def set_pybullet_state(sim, robot_model, num_dofs, angles, velocities):
for i in range(num_dofs):
j_idx = (
robot_model._controlled_joints[i] - 1
) # pybullet link idx starts at -1 for base link
p.resetJointState(
bodyUniqueId=sim.robot_id,
jointIndex=j_idx,
targetValue=angles[i],
targetVelocity=velocities[i],
physicsClientId=sim.pc_id,
)
# Main test class
class TestRobotModel:
def test_end_effector_state(self, request, setup_dict, test_info):
robot_model, sim, num_dofs, test_case = extract_setup_dict(setup_dict)
for ee_link_idx, ee_link_name in test_info.link_list:
# Bullet sim
set_pybullet_state(
sim, robot_model, num_dofs, test_case.joint_pos, test_case.joint_vel
)
bullet_ee_state = p.getLinkState(
sim.robot_id, ee_link_idx, physicsClientId=sim.pc_id
)
# Differentiable model
model_ee_state = robot_model.compute_forward_kinematics(
torch.Tensor(test_case.joint_pos).reshape(1, num_dofs), ee_link_name
)
# Compare
assert np.allclose(
model_ee_state[0].detach().numpy(),
np.asarray(bullet_ee_state[0]),
atol=1e-7,
)
assert np.allclose(
model_ee_state[1].detach().numpy(),
np.asarray(bullet_ee_state[1]),
atol=1e-7,
)
def test_ee_jacobian(self, request, setup_dict, test_info):
robot_model, sim, num_dofs, test_case = extract_setup_dict(setup_dict)
for ee_link_idx, ee_link_name in test_info.link_list:
# Bullet sim
set_pybullet_state(
sim, robot_model, num_dofs, test_case.joint_pos, test_case.joint_vel
)
bullet_jac_lin, bullet_jac_ang = p.calculateJacobian(
bodyUniqueId=sim.robot_id,
linkIndex=ee_link_idx,
localPosition=[0, 0, 0],
objPositions=test_case.joint_pos,
objVelocities=test_case.joint_vel,
objAccelerations=[0] * num_dofs,
physicsClientId=sim.pc_id,
)
# Differentiable model
model_jac_lin, model_jac_ang = robot_model.compute_endeffector_jacobian(
torch.Tensor(test_case.joint_pos).reshape(1, num_dofs), ee_link_name
)
# Compare
assert np.allclose(
model_jac_lin.detach().numpy(), np.asarray(bullet_jac_lin), atol=1e-7
)
assert np.allclose(
model_jac_ang.detach().numpy(), np.asarray(bullet_jac_ang), atol=1e-7
)
@pytest.mark.parametrize("use_damping", [True, False])
def test_inverse_dynamics(self, request, setup_dict, use_damping):
robot_model, sim, num_dofs, test_case = extract_setup_dict(setup_dict)
# Bullet sim
set_pybullet_state(
sim, robot_model, num_dofs, test_case.joint_pos, test_case.joint_vel
)
bullet_torques = p.calculateInverseDynamics(
sim.robot_id,
test_case.joint_pos,
test_case.joint_vel,
test_case.joint_acc,
physicsClientId=sim.pc_id,
)
# Differentiable model
model_torques = robot_model.compute_inverse_dynamics(
torch.Tensor(test_case.joint_pos).reshape(1, num_dofs),
torch.Tensor(test_case.joint_vel).reshape(1, num_dofs),
torch.Tensor(test_case.joint_acc).reshape(1, num_dofs),
include_gravity=True,
use_damping=use_damping,
)
if use_damping:
# if we have non-zero joint damping, we'll have to subtract the damping term from our predicted torques,
# because pybullet does not include damping/viscous friction in their inverse dynamics call
damping_const = torch.zeros(1, num_dofs)
qd = torch.Tensor(test_case.joint_vel).reshape(1, num_dofs)
for i in range(robot_model._n_dofs):
idx = robot_model._controlled_joints[i]
damping_const[:, i] = robot_model._bodies[idx].get_joint_damping_const()
damping_term = damping_const.repeat(1, 1) * qd
model_torques -= damping_term
# Compare
assert np.allclose(
model_torques.detach().squeeze().numpy(),
np.asarray(bullet_torques),
atol=1e-7,
)
def test_mass_computation(self, request, setup_dict):
robot_model, sim, num_dofs, test_case = extract_setup_dict(setup_dict)
# Bullet sim
set_pybullet_state(
sim, robot_model, num_dofs, test_case.joint_pos, test_case.joint_vel
)
bullet_mass = np.array(
p.calculateMassMatrix(
sim.robot_id, test_case.joint_pos, physicsClientId=sim.pc_id
)
)
# Differentiable model
inertia_mat = robot_model.compute_lagrangian_inertia_matrix(
torch.Tensor(test_case.joint_pos).reshape(1, num_dofs)
)
# Compare
assert np.allclose(
inertia_mat.detach().squeeze().numpy(), bullet_mass, atol=1e-7
)
@pytest.mark.parametrize("use_damping", [True, False])
def test_forward_dynamics(self, request, setup_dict, use_damping):
robot_model, sim, num_dofs, test_case = extract_setup_dict(setup_dict)
# Bullet sim
dt = 1.0 / 240.0
controlled_joints = [i - 1 for i in robot_model._controlled_joints]
if not use_damping: # update joint damping
for link_idx in range(sim.num_joints):
p.changeDynamics(
sim.robot_id,
link_idx,
linearDamping=0.0,
angularDamping=0.0,
jointDamping=0.0,
physicsClientId=sim.pc_id,
)
p.setJointMotorControlArray( # activating torque control
bodyIndex=sim.robot_id,
jointIndices=controlled_joints,
controlMode=p.VELOCITY_CONTROL,
forces=np.zeros(num_dofs),
physicsClientId=sim.pc_id,
)
set_pybullet_state(
sim, robot_model, num_dofs, test_case.joint_pos, test_case.joint_vel
)
bullet_tau = (
np.array( # torque that achieves test_case.joint_acc from current state
p.calculateInverseDynamics(
sim.robot_id,
test_case.joint_pos,
test_case.joint_vel,
test_case.joint_acc,
physicsClientId=sim.pc_id,
)
)
)
p.setJointMotorControlArray(
bodyIndex=sim.robot_id,
jointIndices=controlled_joints,
controlMode=p.TORQUE_CONTROL,
forces=bullet_tau,
physicsClientId=sim.pc_id,
)
p.stepSimulation(physicsClientId=sim.pc_id)
cur_joint_states = p.getJointStates(
sim.robot_id, controlled_joints, physicsClientId=sim.pc_id
)
q = [cur_joint_states[i][0] for i in range(num_dofs)]
qd = [cur_joint_states[i][1] for i in range(num_dofs)]
qdd = (np.array(qd) - np.array(test_case.joint_vel)) / dt
# Differentiable model
model_qdd = robot_model.compute_forward_dynamics(
torch.Tensor(test_case.joint_pos).reshape(1, num_dofs),
torch.Tensor(test_case.joint_vel).reshape(1, num_dofs),
torch.Tensor(bullet_tau).reshape(1, num_dofs),
include_gravity=True,
use_damping=use_damping,
)
# Compare
model_qdd = np.asarray(model_qdd.detach().squeeze())
assert np.allclose(model_qdd, qdd, atol=1e-7)
if not use_damping:
# we can only test this if joint damping is zero,
# if it is non-zero the pybullet forward dynamics and inverse dynamics call will not be exactly the
# "inverse" of each other
assert np.allclose(model_qdd, np.asarray(test_case.joint_acc), atol=1e-7)
``` |
{
"source": "1heart/fairo",
"score": 2
} |
#### File: agents/locobot/locobot_agent.py
```python
import os
import subprocess
import time
import signal
import random
import logging
import faulthandler
from multiprocessing import set_start_method
import shutil
from droidlet import dashboard
if __name__ == "__main__":
# this line has to go before any imports that contain @sio.on functions
# or else, those @sio.on calls become no-ops
dashboard.start()
from droidlet.dialog.dialogue_manager import DialogueManager
from droidlet.dialog.map_to_dialogue_object import DialogueObjectMapper
from droidlet.base_util import to_player_struct, Pos, Look, Player
from droidlet.memory.memory_nodes import PlayerNode
from droidlet.perception.semantic_parsing.nsp_querier import NSPQuerier
from agents.droidlet_agent import DroidletAgent
from agents.argument_parser import ArgumentParser
import agents.locobot.label_prop as LP
from droidlet.memory.robot.loco_memory import LocoAgentMemory, DetectedObjectNode
from droidlet.perception.robot import Perception
from droidlet.perception.semantic_parsing.utils.interaction_logger import InteractionLogger
from self_perception import SelfPerception
from droidlet.interpreter.robot import (
dance,
default_behaviors,
LocoGetMemoryHandler,
PutMemoryHandler,
LocoInterpreter,
)
from droidlet.dialog.robot import LocoBotCapabilities
import droidlet.lowlevel.locobot.rotation as rotation
from droidlet.lowlevel.locobot.locobot_mover import LoCoBotMover
from droidlet.event import sio
faulthandler.register(signal.SIGUSR1)
random.seed(0)
log_formatter = logging.Formatter(
"%(asctime)s [%(filename)s:%(lineno)s - %(funcName)s() %(levelname)s]: %(message)s"
)
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger().handlers.clear()
class LocobotAgent(DroidletAgent):
"""Implements an instantiation of the LocoMCAgent on a Locobot. It starts
off the agent processes including launching the dashboard.
Args:
opts (argparse.Namespace): opts returned by the ArgumentParser with defaults set
that you can override.
name (string, optional): a name for your agent (default: Locobot)
Example:
>>> python locobot_agent.py --backend 'locobot'
"""
coordinate_transforms = rotation
def __init__(self, opts, name="Locobot"):
super(LocobotAgent, self).__init__(opts)
logging.info("LocobotAgent.__init__ started")
self.agent_type = "locobot"
self.opts = opts
self.entityId = 0
self.no_default_behavior = opts.no_default_behavior
self.last_chat_time = -1000000000000
self.name = name
self.player = Player(100, name, Pos(0, 0, 0), Look(0, 0))
self.pos = Pos(0, 0, 0)
self.uncaught_error_count = 0
self.last_task_memid = None
self.point_targets = []
self.init_event_handlers()
# list of (prob, default function) pairs
self.visible_defaults = [(1.0, default_behaviors.explore)]
self.interaction_logger = InteractionLogger()
if os.path.exists("annotation_data/rgb"):
shutil.rmtree("annotation_data/rgb")
if os.path.exists("annotation_data/seg"):
shutil.rmtree("annotation_data/seg")
def init_event_handlers(self):
super().init_event_handlers()
@sio.on("movement command")
def test_command(sid, commands, movement_values={}):
if len(movement_values) == 0:
movement_values["yaw"] = 0.01
movement_values["velocity"] = 0.1
movement = [0.0, 0.0, 0.0]
for command in commands:
if command == "MOVE_FORWARD":
movement[0] += movement_values["velocity"]
print("action: FORWARD")
elif command == "MOVE_BACKWARD":
movement[0] -= movement_values["velocity"]
print("action: BACKWARD")
elif command == "MOVE_LEFT":
movement[2] += movement_values["yaw"]
print("action: LEFT")
elif command == "MOVE_RIGHT":
movement[2] -= movement_values["yaw"]
print("action: RIGHT")
elif command == "PAN_LEFT":
self.mover.bot.set_pan(self.mover.bot.get_pan() + 0.08)
elif command == "PAN_RIGHT":
self.mover.bot.set_pan(self.mover.bot.get_pan() - 0.08)
elif command == "TILT_UP":
self.mover.bot.set_tilt(self.mover.bot.get_tilt() - 0.08)
elif command == "TILT_DOWN":
self.mover.bot.set_tilt(self.mover.bot.get_tilt() + 0.08)
self.mover.move_relative([movement])
@sio.on("shutdown")
def _shutdown(sid, data):
self.shutdown()
@sio.on("get_memory_objects")
def objects_in_memory(sid):
objects = DetectedObjectNode.get_all(self.memory)
for o in objects:
del o["feature_repr"] # pickling optimization
self.dashboard_memory["objects"] = objects
sio.emit("updateState", {"memory": self.dashboard_memory})
@sio.on("interaction data")
def log_interaction_data(sid, interactionData):
self.interaction_logger.logInteraction(interactionData)
# Returns an array of objects with updated masks
@sio.on("label_propagation")
def label_propagation(sid, postData):
objects = LP.label_propagation(postData)
sio.emit("labelPropagationReturn", objects)
@sio.on("save_rgb_seg")
def save_rgb_seg(sid, postData):
LP.save_rgb_seg(postData)
if "callback" in postData and postData["callback"]:
sio.emit("saveRgbSegCallback")
@sio.on("save_annotations")
def save_annotations(sid, categories):
LP.save_annotations(categories)
@sio.on("save_categories_properties")
def save_categories_properties(sid, categories, properties):
LP.save_categories_properties(categories, properties)
@sio.on("retrain_detector")
def retrain_detector(sid, settings={}):
inference_json = LP.retrain_detector(settings)
sio.emit("annotationRetrain", inference_json)
@sio.on("switch_detector")
def switch_detector(sid):
model_dir = "annotation_data/model"
model_names = os.listdir(model_dir)
model_nums = list(map(lambda x: int(x.split("v")[1]), model_names))
last_model_num = max(model_nums)
model_path = os.path.join(model_dir, "v" + str(last_model_num))
detector_weights = "model_999.pth"
properties_file = "props.json"
things_file = "things.json"
files = os.listdir(model_path)
if detector_weights not in files:
print(
"Error switching model:",
os.path.join(model_path, detector_weights),
"not found",
)
return
if properties_file not in files:
print(
"Error switching model:",
os.path.join(model_path, properties_file),
"not found",
)
return
if things_file not in files:
print("Error switching model:", os.path.join(model_path, things_file), "not found")
return
print("switching to", model_path)
self.perception_modules["vision"] = Perception(model_path, default_keypoints_path=True)
def init_memory(self):
"""Instantiates memory for the agent.
Uses the DB_FILE environment variable to write the memory to a
file or saves it in-memory otherwise.
"""
self.memory = LocoAgentMemory(
db_file=os.environ.get("DB_FILE", ":memory:"),
db_log_path=None,
coordinate_transforms=self.coordinate_transforms,
)
dance.add_default_dances(self.memory)
logging.info("Initialized agent memory")
def init_perception(self):
"""Instantiates all perceptual modules.
Each perceptual module should have a perceive method that is
called by the base agent event loop.
"""
if not hasattr(self, "perception_modules"):
self.perception_modules = {}
self.perception_modules["language_understanding"] = NSPQuerier(self.opts, self)
self.perception_modules["self"] = SelfPerception(self)
self.perception_modules["vision"] = Perception(self.opts.perception_model_dir)
def perceive(self, force=False):
# 1. perceive from NLU parser
super().perceive(force=force)
# 2. perceive from robot perception modules
self.perception_modules["self"].perceive(force=force)
rgb_depth = self.mover.get_rgb_depth()
xyz = self.mover.get_base_pos_in_canonical_coords()
x, y, yaw = xyz
sio.emit(
"map",
{"x": x, "y": y, "yaw": yaw, "map": self.mover.get_obstacles_in_canonical_coords()},
)
previous_objects = DetectedObjectNode.get_all(self.memory)
# perception_output is a namedtuple of : new_detections, updated_detections, humans
perception_output = self.perception_modules["vision"].perceive(rgb_depth,
xyz,
previous_objects,
force=force)
self.memory.update(perception_output)
def init_controller(self):
"""Instantiates controllers - the components that convert a text chat to task(s)."""
dialogue_object_classes = {}
dialogue_object_classes["bot_capabilities"] = {"task": LocoBotCapabilities, "data": {}}
dialogue_object_classes["interpreter"] = LocoInterpreter
dialogue_object_classes["get_memory"] = LocoGetMemoryHandler
dialogue_object_classes["put_memory"] = PutMemoryHandler
self.dialogue_manager = DialogueManager(
memory=self.memory,
dialogue_object_classes=dialogue_object_classes,
dialogue_object_mapper=DialogueObjectMapper,
opts=self.opts,
)
def init_physical_interfaces(self):
"""Instantiates the interface to physically move the robot."""
self.mover = LoCoBotMover(ip=self.opts.ip, backend=self.opts.backend)
def get_player_struct_by_name(self, speaker_name):
p = self.memory.get_player_by_name(speaker_name)
if p:
return p.get_struct()
else:
return None
def get_other_players(self):
return [self.player]
def get_incoming_chats(self):
all_chats = []
speaker_name = "dashboard"
if self.dashboard_chat is not None:
if not self.memory.get_player_by_name(speaker_name):
PlayerNode.create(
self.memory,
to_player_struct((None, None, None), None, None, None, speaker_name),
)
all_chats.append(self.dashboard_chat)
self.dashboard_chat = None
return all_chats
# # FIXME!!!!
def send_chat(self, chat: str):
logging.info("Sending chat: {}".format(chat))
# Send the socket event to show this reply on dashboard
sio.emit("showAssistantReply", {"agent_reply": "Agent: {}".format(chat)})
self.memory.add_chat(self.memory.self_memid, chat)
# actually send the chat, FIXME FOR HACKATHON
# return self._cpp_send_chat(chat)
def step(self):
super().step()
time.sleep(0)
def task_step(self, sleep_time=0.0):
super().task_step(sleep_time=sleep_time)
def shutdown(self):
self._shutdown = True
try:
self.perception_modules["vision"].vprocess_shutdown.set()
except:
"""
the try/except is there in the event that
self.perception_modules["vision"] has either:
1. not been fully started yet
2. already crashed / shutdown due to other effects
"""
pass
time.sleep(5) # let the other threads die
os._exit(0) # TODO: remove and figure out why multiprocess sometimes hangs on exit
if __name__ == "__main__":
base_path = os.path.dirname(__file__)
parser = ArgumentParser("Locobot", base_path)
opts = parser.parse()
logging.basicConfig(level=opts.log_level.upper())
# set up stdout logging
sh = logging.StreamHandler()
sh.setFormatter(log_formatter)
logger = logging.getLogger()
logger.addHandler(sh)
logging.info("LOG LEVEL: {}".format(logger.level))
# Check that models and datasets are up to date
if not opts.dev:
rc = subprocess.call([opts.verify_hash_script_path, "locobot"])
set_start_method("spawn", force=True)
sa = LocobotAgent(opts)
sa.start()
```
#### File: semantic_parsing/tests/test_nsp_loading.py
```python
import os
import unittest
from droidlet.perception.semantic_parsing.nsp_transformer_model.query_model import NSPBertModel as Model
NLU_MODEL_DIR = os.path.join(
os.path.dirname(__file__), "../../../../agents/craftassist/models/semantic_parser/"
)
NLU_DATA_DIR = os.path.join(
os.path.dirname(__file__), "../../../../agents/craftassist/datasets/annotated_data/"
)
class TestNSPModel(unittest.TestCase):
def setUp(self):
self.nsp_model_dir = os.path.join(NLU_MODEL_DIR, "ttad_bert_updated")
self.model = Model(model_dir=self.nsp_model_dir, data_dir=NLU_DATA_DIR)
def test_model_parse(self):
chat = 'come here'
logical_form = self.model.parse(chat=chat)
self.assertEqual(type(logical_form), dict)
self.assertTrue("dialogue_type" in logical_form)
chat = 'hello'
logical_form = self.model.parse(chat=chat)
self.assertEqual(type(logical_form), dict)
self.assertTrue("dialogue_type" in logical_form)
chat = 'dance'
logical_form = self.model.parse(chat=chat)
self.assertEqual(type(logical_form), dict)
self.assertTrue("dialogue_type" in logical_form)
def test_model_dir(self):
# change the model directory and assert model doesn't load
self.assertRaises(Exception, Model, self.nsp_model_dir + "wert", NLU_DATA_DIR)
if __name__ == '__main__':
unittest.main()
```
#### File: semantic_parsing/tests/test_y_print_parsing_report.py
```python
import os
import unittest
import json
from ..nsp_querier import NSPQuerier
from droidlet.shared_data_structs import MockOpt
from prettytable import PrettyTable
class fontcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKCYAN = "\033[96m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
# NOTE: The following commands in locobot_commands can't be supported
# right away but we'll attempt them in the next round:
# "push the chair",
# "find the closest red thing",
# "copy this motion",
# "topple the pile of notebooks",
common_functional_commands = {
"turn right": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"dance_type": {"body_turn": {"relative_yaw": {"fixed_value": "-90"}}},
"action_type": "DANCE",
}
],
},
"where are my keys": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": {"attribute": "LOCATION"},
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [3, 3]]}]
},
},
},
"point at the table": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"dance_type": {
"point": {
"location": {
"reference_object": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [3, 3]]}]
}
}
}
}
}
},
"action_type": "DANCE",
}
],
},
"dig two tiny holes there": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"location": {"contains_coreference": "yes"},
"action_type": "DIG",
"schematic": {
"filters": {
"where_clause": {
"AND": [
{"pred_text": "has_name", "obj_text": [0, [3, 3]]},
{"pred_text": "has_size", "obj_text": [0, [2, 2]]},
]
},
"selector": {
"return_quantity": "RANDOM",
"ordinal": [0, [1, 1]],
"same": "ALLOWED",
},
}
},
}
],
},
"go there": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [{"location": {"contains_coreference": "yes"}, "action_type": "MOVE"}],
},
"can you climb on top of the cube": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"location": {
"relative_direction": "UP",
"reference_object": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [7, 7]]}]
}
}
},
},
"action_type": "MOVE",
}
],
},
"go to the circle": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"location": {
"reference_object": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [3, 3]]}]
}
}
}
},
"action_type": "MOVE",
}
],
},
"what is the name of the yellow shape": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": {"attribute": "NAME"},
"where_clause": {
"AND": [{"pred_text": "has_colour", "obj_text": [0, [6, 6]]}]
},
},
},
"what can you do": {"dialogue_type": "GET_CAPABILITIES", "action_type": "ANY"},
"what is that blue object": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": {"attribute": "NAME"},
"where_clause": {
"AND": [{"pred_text": "has_colour", "obj_text": [0, [3, 3]]}]
},
"contains_coreference": "yes",
},
},
"make two red cubes there": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"schematic": {
"filters": {
"where_clause": {
"AND": [
{"pred_text": "has_name", "obj_text": [0, [3, 3]]},
{"pred_text": "has_colour", "obj_text": [0, [2, 2]]},
]
},
"selector": {
"return_quantity": "RANDOM",
"ordinal": [0, [1, 1]],
"same": "ALLOWED",
},
}
},
"action_type": "BUILD",
"location": {"contains_coreference": "yes"},
}
],
},
"go to the window": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"location": {
"reference_object": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [3, 3]]}]
}
}
}
},
"action_type": "MOVE",
}
],
},
"point to the table": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"dance_type": {
"point": {
"location": {
"reference_object": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [3, 3]]}]
}
}
}
}
}
},
"action_type": "DANCE",
}
],
},
"look at the table": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"dance_type": {
"look_turn": {
"location": {
"reference_object": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [3, 3]]}]
}
}
}
}
}
},
"action_type": "DANCE",
}
],
},
"go left": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"location": {
"relative_direction": "LEFT",
"reference_object": {"special_reference": {"fixed_value": "AGENT"}},
},
"action_type": "MOVE",
}
],
},
"fill that hole up with sand": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"schematic": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_block_type", "obj_text": [0, [5, 5]]}]
}
}
},
"action_type": "FILL",
"reference_object": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [2, 2]]}]
}
},
"contains_coreference": "yes",
},
}
],
},
"what size is the table": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": {"attribute": "SIZE"},
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [4, 4]]}]
},
},
},
"what is outside the window": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": {"attribute": "NAME"},
"selector": {
"location": {
"text_span": [0, [2, 4]],
"relative_direction": "OUTSIDE",
"reference_object": {
"text_span": [0, [4, 4]],
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [4, 4]]}]
}
},
},
}
},
},
},
"follow me": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"remove_condition": {"condition_type": "NEVER"},
"location": {
"reference_object": {"special_reference": {"fixed_value": "SPEAKER"}}
},
"action_type": "MOVE",
}
],
},
"make a yellow circle to the left of the square": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"schematic": {
"filters": {
"where_clause": {
"AND": [
{"pred_text": "has_name", "obj_text": [0, [3, 3]]},
{"pred_text": "has_colour", "obj_text": [0, [2, 2]]},
]
}
}
},
"action_type": "BUILD",
"location": {
"relative_direction": "LEFT",
"reference_object": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [9, 9]]}]
}
}
},
},
}
],
},
"how many red things are there": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": "COUNT",
"where_clause": {
"AND": [{"pred_text": "has_colour", "obj_text": [0, [2, 2]]}]
},
},
},
"can you topple the circle": {"dialogue_type": "GET_CAPABILITIES", "action_type": "UNKNOWN"},
"spawn two pigs": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"action_type": "SPAWN",
"reference_object": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [2, 2]]}]
},
"selector": {
"return_quantity": "RANDOM",
"ordinal": [0, [1, 1]],
"same": "DISALLOWED",
},
}
},
}
],
},
"what is to the left of that": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": {"attribute": "NAME"},
"selector": {
"location": {
"relative_direction": "LEFT",
"reference_object": {"filters": {"contains_coreference": "yes"}},
}
},
"memory_type": "REFERENCE_OBJECT",
},
},
"what is to the left of the square": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": {"attribute": "NAME"},
"selector": {
"location": {
"relative_direction": "LEFT",
"reference_object": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [7, 7]]}]
}
}
},
}
},
"memory_type": "REFERENCE_OBJECT",
},
},
"go to the table": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"location": {
"reference_object": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [3, 3]]}]
}
}
}
},
"action_type": "MOVE",
}
],
},
"have you seen my phone": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": {"attribute": "LOCATION"},
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [4, 4]]}]
},
},
},
"what size is the square": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": {"attribute": "SIZE"},
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [4, 4]]}]
},
},
},
"destroy that": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"action_type": "DESTROY",
"reference_object": {"filters": {"contains_coreference": "yes"}},
}
],
},
"go forward": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"location": {
"relative_direction": "FRONT",
"reference_object": {"special_reference": {"fixed_value": "AGENT"}},
},
"action_type": "MOVE",
}
],
},
"look at the circle": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"dance_type": {
"look_turn": {
"location": {
"reference_object": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [3, 3]]}]
}
}
}
}
}
},
"action_type": "DANCE",
}
],
},
"make a big green square behind me": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"schematic": {
"filters": {
"where_clause": {
"AND": [
{"pred_text": "has_name", "obj_text": [0, [4, 4]]},
{"pred_text": "has_colour", "obj_text": [0, [3, 3]]},
{"pred_text": "has_size", "obj_text": [0, [2, 2]]},
]
}
}
},
"action_type": "BUILD",
"location": {
"relative_direction": "BACK",
"reference_object": {"special_reference": {"fixed_value": "SPEAKER"}},
},
}
],
},
"follow the sheep": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"remove_condition": {"condition_type": "NEVER"},
"location": {
"reference_object": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [2, 2]]}]
}
}
}
},
"action_type": "MOVE",
}
],
},
"find the pig": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": {"attribute": "LOCATION"},
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [2, 2]]}]
},
},
},
"can you climb on top of the couch": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"location": {
"relative_direction": "UP",
"reference_object": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [7, 7]]}]
}
}
},
},
"action_type": "MOVE",
}
],
},
"where am i": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": {"attribute": "LOCATION"},
"memory_type": "REFERENCE_OBJECT",
"where_clause": {
"AND": [{"pred_text": "has_tag", "obj_text": {"fixed_value": "SPEAKER"}}]
},
},
},
"how many pencils are there": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": "COUNT",
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [2, 2]]}]
},
},
},
"what color is the chair": {
"dialogue_type": "GET_MEMORY",
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [4, 4]]}]
},
"output": {"attribute": "COLOUR"},
},
},
"come here": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [{"location": {"contains_coreference": "yes"}, "action_type": "MOVE"}],
},
"where is the picture": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": {"attribute": "LOCATION"},
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [3, 3]]}]
},
},
},
"make two circles there": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"location": {"contains_coreference": "yes"},
"action_type": "BUILD",
"schematic": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [2, 2]]}]
},
"selector": {
"return_quantity": "RANDOM",
"ordinal": [0, [1, 1]],
"same": "ALLOWED",
},
}
},
}
],
},
"show me to the bathroom": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": {"attribute": "LOCATION"},
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [4, 4]]}]
},
},
},
"point to the jacket": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"dance_type": {
"point": {
"location": {
"reference_object": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [3, 3]]}]
}
}
}
}
}
},
"action_type": "DANCE",
}
],
},
"point at the cube": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"dance_type": {
"point": {
"location": {
"reference_object": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [3, 3]]}]
}
}
}
}
}
},
"action_type": "DANCE",
}
],
},
"hi": {"dialogue_type": "NOOP"},
"go back": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"location": {
"relative_direction": "BACK",
"reference_object": {"special_reference": {"fixed_value": "AGENT"}},
},
"action_type": "MOVE",
}
],
},
"how many cubes are there": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": "COUNT",
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [2, 2]]}]
},
},
},
"is there anything big": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": "MEMORY",
"where_clause": {
"AND": [{"pred_text": "has_size", "obj_text": [0, [3, 3]]}]
},
},
},
"what color is the square": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": {"attribute": "COLOUR"},
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [4, 4]]}]
},
},
},
"show me to the square": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": {"attribute": "LOCATION"},
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [4, 4]]}]
},
},
},
"have you seen the pig": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": {"attribute": "LOCATION"},
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [4, 4]]}]
},
},
},
"can you topple the chair": {"dialogue_type": "GET_CAPABILITIES", "action_type": "UNKNOWN"},
"point at the square": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"dance_type": {
"point": {
"location": {
"reference_object": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [3, 3]]}]
}
}
}
}
}
},
"action_type": "DANCE",
}
],
},
"what is the name of the thing closest to you": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": {"attribute": "NAME"},
"selector": {
"ordinal": {"fixed_value": "FIRST"},
"return_quantity": {
"argval": {
"polarity": "MIN",
"quantity": {
"attribute": {
"linear_extent": {
"source": {
"reference_object": {
"special_reference": {"fixed_value": "AGENT"}
}
}
}
}
},
}
}
},
},
},
"is there anything small": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": "MEMORY",
"where_clause": {
"AND": [{"pred_text": "has_size", "obj_text": [0, [3, 3]]}]
},
},
},
"look at the hole": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"dance_type": {
"look_turn": {
"location": {
"reference_object": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [3, 3]]}]
}
}
}
}
}
},
"action_type": "DANCE",
}
],
},
"how many yellow things do you see": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": "COUNT",
"where_clause": {
"AND": [{"pred_text": "has_colour", "obj_text": [0, [2, 2]]}]
},
},
},
"is there anything red": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": "MEMORY",
"where_clause": {
"AND": [{"pred_text": "has_colour", "obj_text": [0, [3, 3]]}]
},
},
},
"where is the circle": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": {"attribute": "LOCATION"},
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [3, 3]]}]
},
},
},
"turn left": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"dance_type": {"body_turn": {"relative_yaw": {"fixed_value": "90"}}},
"action_type": "DANCE",
}
],
},
"where are you": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": {"attribute": "LOCATION"},
"where_clause": {
"AND": [{"pred_text": "has_tag", "obj_text": {"fixed_value": "SELF"}}]
},
"memory_type": "REFERENCE_OBJECT",
},
},
"what is the name of the object to my left": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": {"attribute": "NAME"},
"selector": {
"location": {
"relative_direction": "LEFT",
"reference_object": {"special_reference": {"fixed_value": "SPEAKER"}},
}
},
},
},
"what is the name of the thing closest to me": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": {"attribute": "NAME"},
"selector": {
"ordinal": {"fixed_value": "FIRST"},
"return_quantity": {
"argval": {
"polarity": "MIN",
"quantity": {
"attribute": {
"linear_extent": {
"source": {
"reference_object": {
"special_reference": {"fixed_value": "SPEAKER"}
}
}
}
}
},
}
}
},
},
},
"go right": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"location": {
"relative_direction": "RIGHT",
"reference_object": {"special_reference": {"fixed_value": "AGENT"}},
},
"action_type": "MOVE",
}
],
},
"find the hoodie": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": {"attribute": "LOCATION"},
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": [0, [2, 2]]}]
},
},
},
}
GROUND_TRUTH_PARSES = {
"go to the gray chair": {
"action_sequence": [
{
"action_type": "MOVE",
"location": {
"reference_object": {
"filters": {
"where_clause": {
"AND": [
{"pred_text": "has_colour", "obj_text": "gray"},
{"pred_text": "has_name", "obj_text": "chair"},
]
}
}
}
},
}
],
"dialogue_type": "HUMAN_GIVE_COMMAND",
},
"go to the chair": {
"action_sequence": [
{
"action_type": "MOVE",
"location": {
"reference_object": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": "chair"}]
}
}
}
},
}
],
"dialogue_type": "HUMAN_GIVE_COMMAND",
},
"go forward 0.2 meters": {
"action_sequence": [
{
"action_type": "MOVE",
"location": {
"reference_object": {"special_reference": "AGENT"},
"relative_direction": "FRONT",
"steps": "0.2",
"has_measure": "meters",
},
}
],
"dialogue_type": "HUMAN_GIVE_COMMAND",
},
"go forward one meter": {
"action_sequence": [
{
"action_type": "MOVE",
"location": {
"reference_object": {"special_reference": "AGENT"},
"relative_direction": "FRONT",
"steps": "one",
"has_measure": "meter",
},
}
],
"dialogue_type": "HUMAN_GIVE_COMMAND",
},
"go left 3 feet": {
"action_sequence": [
{
"action_type": "MOVE",
"location": {
"reference_object": {"special_reference": "AGENT"},
"relative_direction": "LEFT",
"steps": "3",
"has_measure": "feet",
},
}
],
"dialogue_type": "HUMAN_GIVE_COMMAND",
},
"go right 3 feet": {
"action_sequence": [
{
"action_type": "MOVE",
"location": {
"reference_object": {"special_reference": "AGENT"},
"relative_direction": "RIGHT",
"steps": "3",
"has_measure": "feet",
},
}
],
"dialogue_type": "HUMAN_GIVE_COMMAND",
},
"go left 3 meters": {
"action_sequence": [
{
"action_type": "MOVE",
"location": {
"reference_object": {"special_reference": "AGENT"},
"relative_direction": "LEFT",
"steps": "3",
"has_measure": "meters",
},
}
],
"dialogue_type": "HUMAN_GIVE_COMMAND",
},
"go forward 1 feet": {
"action_sequence": [
{
"action_type": "MOVE",
"location": {
"reference_object": {"special_reference": "AGENT"},
"relative_direction": "FRONT",
"steps": "1",
"has_measure": "feet",
},
}
],
"dialogue_type": "HUMAN_GIVE_COMMAND",
},
"go back 1 feet": {
"action_sequence": [
{
"action_type": "MOVE",
"location": {
"reference_object": {"special_reference": "AGENT"},
"relative_direction": "BACK",
"steps": "1",
"has_measure": "feet",
},
}
],
"dialogue_type": "HUMAN_GIVE_COMMAND",
},
"turn right 90 degrees": {
"action_sequence": [
{"action_type": "DANCE", "dance_type": {"body_turn": {"relative_yaw": "-90"}}}
],
"dialogue_type": "HUMAN_GIVE_COMMAND",
},
"turn left 90 degrees": {
"action_sequence": [
{"action_type": "DANCE", "dance_type": {"body_turn": {"relative_yaw": "90"}}}
],
"dialogue_type": "HUMAN_GIVE_COMMAND",
},
"turn right 180 degrees": {
"action_sequence": [
{"action_type": "DANCE", "dance_type": {"body_turn": {"relative_yaw": "-180"}}}
],
"dialogue_type": "HUMAN_GIVE_COMMAND",
},
"turn right": {
"action_sequence": [
{"action_type": "DANCE", "dance_type": {"body_turn": {"relative_yaw": "-90"}}}
],
"dialogue_type": "HUMAN_GIVE_COMMAND",
},
"look at where I am pointing": {
"action_sequence": [
{
"action_type": "DANCE",
"dance_type": {
"look_turn": {
"location": {"reference_object": {"special_reference": "SPEAKER_LOOK"}}
}
},
}
],
"dialogue_type": "HUMAN_GIVE_COMMAND",
},
"wave": {
"action_sequence": [
{
"action_type": "DANCE",
"dance_type": {
"filters": {
"where_clause": {
"AND" : [{"pred_text": "has_name", "obj_text": "wave"}]
}
}
},
}
],
"dialogue_type": "HUMAN_GIVE_COMMAND",
},
"follow the chair": {
"action_sequence": [
{
"action_type": "MOVE",
"location": {
"reference_object": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": "chair"}]
}
}
}
},
"remove_condition": {"condition_type": "NEVER"},
}
],
"dialogue_type": "HUMAN_GIVE_COMMAND",
},
"find Laurens": {
"action_sequence": [
{
"action_type": "SCOUT",
"reference_object": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": "Laurens"}]
}
}
},
}
],
"dialogue_type": "HUMAN_GIVE_COMMAND",
},
"bring the cup to Mary": {
"action_sequence": [
{
"action_type": "GET",
"receiver": {
"reference_object": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": "Mary"}]
}
}
}
},
"reference_object": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": "cup"}]
}
}
},
}
],
"dialogue_type": "HUMAN_GIVE_COMMAND",
},
"go get me lunch": {
"action_sequence": [
{
"action_type": "GET",
"receiver": {"reference_object": {"special_reference": "SPEAKER"}},
"reference_object": {
"filters": {
"where_clause": {
"AND": [{"pred_text": "has_name", "obj_text": "lunch"}]
}
}
},
}
],
"dialogue_type": "HUMAN_GIVE_COMMAND",
},
}
common_functional_commands.update(GROUND_TRUTH_PARSES)
TTAD_MODEL_DIR = os.path.join(
os.path.dirname(__file__), "../../../../agents/craftassist/models/semantic_parser/"
)
TTAD_BERT_DATA_DIR = os.path.join(
os.path.dirname(__file__), "../../../../agents/craftassist/datasets/annotated_data/"
)
GROUND_TRUTH_DATA_DIR = os.path.join(
os.path.dirname(__file__), "../../../../agents/craftassist/datasets/ground_truth/"
)
def remove_key_text_span(dictionary):
copy_d = {}
for key, value in dictionary.items():
if type(value) == dict and "text_span" in value:
value.pop("text_span")
copy_d[key] = value
else:
copy_d[key] = value
if type(value) == dict:
copy_d[key] = remove_key_text_span(value)
return copy_d
def remove_text_span(dictionary):
updated_d = {}
if dictionary["dialogue_type"] == "HUMAN_GIVE_COMMAND":
updated_d["action_sequence"] = []
for action_dict in dictionary["action_sequence"]:
updated_action_dict = remove_key_text_span(action_dict)
updated_d["action_sequence"].append(updated_action_dict)
updated_d["dialogue_type"] = "HUMAN_GIVE_COMMAND"
else:
updated_d = remove_key_text_span(dictionary)
return updated_d
def compare_dicts(dict1, dict2):
for k, v in dict1.items():
if k not in dict2:
return False
if type(v) == str and dict2[k] != v:
return False
if type(v) == list:
if type(dict2[k]) != list:
return False
for val in v:
# for triples
if not (val in dict2[k]):
return False
if type(v) == dict:
if type(dict2[k]) != dict:
return False
if not compare_dicts(v, dict2[k]):
return False
return True
def compare_full_dictionaries(d1, d2):
if d1["dialogue_type"] == "HUMAN_GIVE_COMMAND":
if d2["dialogue_type"] != d1["dialogue_type"]:
return False
actions = d1["action_sequence"]
if len(actions) != len(d2["action_sequence"]):
return False
for i, action_dict in enumerate(actions):
if not compare_dicts(action_dict, d2["action_sequence"][i]):
return False
return True
else:
return compare_dicts(d1, d2)
class TestDialogueManager(unittest.TestCase):
def setUp(self) :
opts = MockOpt()
opts.nsp_data_dir = TTAD_BERT_DATA_DIR
opts.ground_truth_data_dir = GROUND_TRUTH_DATA_DIR
opts.nsp_models_dir = TTAD_MODEL_DIR
opts.no_ground_truth = False
self.chat_parser = NSPQuerier(opts=opts)
self.ground_truth_actions = {}
print("fetching data from ground truth, from directory: %r" % (opts.ground_truth_data_dir))
if not opts.no_ground_truth:
if os.path.isdir(opts.ground_truth_data_dir):
dataset = opts.ground_truth_data_dir + "datasets/high_pri_commands.txt"
with open(dataset) as f:
for line in f.readlines():
text, logical_form = line.strip().split("|")
clean_text = text.strip('"').lower()
self.ground_truth_actions[clean_text] = json.loads(logical_form)
self.ground_truth_actions.update(GROUND_TRUTH_PARSES)
def test_parses(self):
table = PrettyTable(["Command", "Overall parsing status", "Parsing model status"])
records = []
parsing_model_status = False
pass_cnt, fail_cnt, model_pass_cnt, model_fail_cnt = 0, 0, 0, 0
for command in common_functional_commands.keys():
ground_truth_parse = common_functional_commands[command]
if command in self.ground_truth_actions:
model_prediction = self.ground_truth_actions[command]
else:
# else query the model and remove the value for key "text_span"
model_prediction = remove_text_span(
self.chat_parser.parsing_model.query_for_logical_form(chat=command)
)
# compute parsing pipeline accuracy
status = compare_full_dictionaries(ground_truth_parse, model_prediction)
if status:
pass_cnt += 1
record = [
fontcolors.OKGREEN + command + fontcolors.ENDC,
fontcolors.OKGREEN + "PASS" + fontcolors.ENDC,
]
else:
fail_cnt += 1
record = [
fontcolors.FAIL + command + fontcolors.ENDC,
fontcolors.FAIL + "FAIL" + fontcolors.ENDC,
]
# compute model correctness status
model_output = remove_text_span(
self.chat_parser.parsing_model.query_for_logical_form(chat=command)
)
parsing_model_status = compare_full_dictionaries(ground_truth_parse, model_output)
if parsing_model_status:
model_pass_cnt += 1
record += [fontcolors.OKGREEN + "PASS" + fontcolors.ENDC]
else:
model_fail_cnt += 1
record += [fontcolors.FAIL + "FAIL" + fontcolors.ENDC]
records.append(record)
for record in records:
table.add_row(record)
print(table)
accuracy = round((pass_cnt / (pass_cnt + fail_cnt)) * 100.0, 2)
model_accuracy = round((model_pass_cnt / (model_pass_cnt + model_fail_cnt)) * 100.0, 2)
print_str = (
fontcolors.OKGREEN
+ "Pass: {} "
+ fontcolors.ENDC
+ fontcolors.FAIL
+ "Fail: {} "
+ fontcolors.ENDC
+ fontcolors.OKCYAN
+ "Parsing pipeline accuracy: {}%"
+ fontcolors.ENDC
)
print_model_str = (
fontcolors.OKGREEN
+ "Pass: {} "
+ fontcolors.ENDC
+ fontcolors.FAIL
+ "Fail: {} "
+ fontcolors.ENDC
+ fontcolors.OKCYAN
+ "Parsing model accuracy: {}%"
+ fontcolors.ENDC
)
print(print_str.format(pass_cnt, fail_cnt, accuracy))
print("Printing Model accuracy status ... ")
print(print_model_str.format(model_pass_cnt, model_fail_cnt, model_accuracy))
# check that parsing pipeline is at a 100% accuracy
self.assertTrue(accuracy == 100.0)
if __name__ == "__main__":
unittest.main()
```
#### File: tests/scripts/5_continuous_grasping_hw.py
```python
import time
import sys
import numpy as np
import torch
import torchcontrol as toco
from torchcontrol.transform import Rotation as R
from torchcontrol.transform import Transformation as T
from polymetis import RobotInterface, GripperInterface
DEFAULT_MAX_ITERS = 3
# Sampling params
GP_RANGE_UPPER = [0.7, 0.1, np.pi / 2]
GP_RANGE_LOWER = [0.4, -0.1, -np.pi / 2]
# Grasp params
REST_POSE = ([0.5, 0.0, 0.7], [1.0, 0.0, 0.0, 0.0])
PREGRASP_HEIGHT = 0.55
GRASP_HEIGHT = 0.25
PLANNER_DT = 0.02
class ManipulatorSystem:
def __init__(self):
self.arm = RobotInterface()
self.gripper = GripperInterface()
time.sleep(0.5)
# Set continuous control policy
self.reset_policy()
# Reset to rest pose
self.rest_pos = torch.Tensor(REST_POSE[0])
self.rest_quat = torch.Tensor(REST_POSE[1])
self.reset()
def __del__(self):
self.arm.terminate_current_policy()
def reset(self, time_to_go=2.0):
self.move_to(self.rest_pos, self.rest_quat, time_to_go)
self.open_gripper()
def reset_policy(self):
# Go home
self.arm.go_home()
# Send PD controller
joint_pos_current = self.arm.get_joint_angles()
policy = toco.policies.JointImpedanceControl(
joint_pos_current=joint_pos_current,
Kp=self.arm.metadata.default_Kq,
Kd=self.arm.metadata.default_Kqd,
robot_model=self.arm.robot_model,
)
self.arm.send_torch_policy(policy, blocking=False)
def move_to(self, pos, quat, time_to_go=2.0):
# Plan trajectory
joint_pos_current = self.arm.get_joint_angles()
N = int(time_to_go / PLANNER_DT)
plan = toco.modules.CartesianSpaceMinJerkJointPlanner(
joint_pos_start=joint_pos_current,
ee_pose_goal=T.from_rot_xyz(R.from_quat(quat), pos),
steps=N,
time_to_go=time_to_go,
robot_model=self.arm.robot_model,
)
# Execute trajectory
t0 = time.time()
t_target = t0
for i in range(N):
# Update traj
joint_pos_desired, _, _ = plan(i)
self.arm.update_current_policy({"joint_pos_desired": joint_pos_desired})
# Spin once
t_target += PLANNER_DT
t_remaining = t_target - time.time()
time.sleep(max(t_remaining, 0.0))
# Wait for robot to stabilize
time.sleep(0.2)
def close_gripper(self):
self.gripper.grasp(speed=0.1, force=1.0)
time.sleep(0.5)
def open_gripper(self):
max_width = self.gripper.get_state().max_width
self.gripper.goto(width=max_width, speed=0.1, force=1.0)
time.sleep(0.5)
def grasp_pose_to_pos_quat(self, grasp_pose, z):
x, y, rz = grasp_pose
pos = torch.Tensor([x, y, z])
quat = (
R.from_rotvec(torch.Tensor([0, 0, rz])) * R.from_quat(self.rest_quat)
).as_quat()
return pos, quat
def grasp(self, grasp_pose0, grasp_pose1):
# Move to pregrasp
pos, quat = self.grasp_pose_to_pos_quat(grasp_pose0, PREGRASP_HEIGHT)
self.move_to(pos, quat)
# Lower (slower than other motions to prevent sudden collisions)
pos, quat = self.grasp_pose_to_pos_quat(grasp_pose0, GRASP_HEIGHT)
self.move_to(pos, quat, time_to_go=4.0)
# Grasp
self.close_gripper()
# Lift to pregrasp
pos, quat = self.grasp_pose_to_pos_quat(grasp_pose0, PREGRASP_HEIGHT)
self.move_to(pos, quat)
# Move to new pregrasp
pos, quat = self.grasp_pose_to_pos_quat(grasp_pose1, PREGRASP_HEIGHT)
self.move_to(pos, quat)
# Release
self.open_gripper()
# Check if policy terminated due to issues
if self.arm.get_previous_interval().end != -1:
print("Interrupt detected. Reinstantiating control policy...")
time.sleep(3)
self.reset_policy()
# Reset
self.reset()
def uniform_sample(lower, upper):
return lower + (upper - lower) * torch.rand_like(lower)
def main(argv):
if len(argv) > 1:
try:
max_iters = int(argv[1])
except ValueError as exc:
print("Usage: python 5_continuous_grasping.py <max_iterations>")
return
else:
max_iters = DEFAULT_MAX_ITERS
# Initialize interfaces
robot = ManipulatorSystem()
# Setup sampling
gp_range_upper = torch.Tensor(GP_RANGE_UPPER)
gp_range_lower = torch.Tensor(GP_RANGE_LOWER)
# Perform grasping
i = 0
try:
while True:
# Sample grasp
grasp_pose0 = uniform_sample(gp_range_lower, gp_range_upper)
grasp_pose1 = uniform_sample(gp_range_lower, gp_range_upper)
# Perform grasp
print(f"Grasp {i + 1}: grasp={grasp_pose0}, release={grasp_pose1}")
robot.grasp(grasp_pose0, grasp_pose1)
# Loop termination
i += 1
if max_iters > 0 and i >= max_iters:
break
except KeyboardInterrupt:
print("Interrupted by user.")
if __name__ == "__main__":
main(sys.argv)
``` |
{
"source": "1heart/graspnet-baseline",
"score": 2
} |
#### File: graspnet_baseline/dataset/graspnet_dataset.py
```python
import os
import sys
import numpy as np
import scipy.io as scio
from PIL import Image
import torch
from collections import abc as container_abcs
from torch.utils.data import Dataset
from tqdm import tqdm
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
from data_utils import CameraInfo, transform_point_cloud, create_point_cloud_from_depth_image,\
get_workspace_mask, remove_invisible_grasp_points
class GraspNetDataset(Dataset):
def __init__(self, root, valid_obj_idxs, grasp_labels, camera='kinect', split='train', num_points=20000,
remove_outlier=False, remove_invisible=True, augment=False, load_label=True):
assert(num_points<=50000)
self.root = root
self.split = split
self.num_points = num_points
self.remove_outlier = remove_outlier
self.remove_invisible = remove_invisible
self.valid_obj_idxs = valid_obj_idxs
self.grasp_labels = grasp_labels
self.camera = camera
self.augment = augment
self.load_label = load_label
self.collision_labels = {}
if split == 'train':
self.sceneIds = list( range(100) )
elif split == 'test':
self.sceneIds = list( range(100,190) )
elif split == 'test_seen':
self.sceneIds = list( range(100,130) )
elif split == 'test_similar':
self.sceneIds = list( range(130,160) )
elif split == 'test_novel':
self.sceneIds = list( range(160,190) )
self.sceneIds = ['scene_{}'.format(str(x).zfill(4)) for x in self.sceneIds]
self.colorpath = []
self.depthpath = []
self.labelpath = []
self.metapath = []
self.scenename = []
self.frameid = []
for x in tqdm(self.sceneIds, desc = 'Loading data path and collision labels...'):
for img_num in range(256):
self.colorpath.append(os.path.join(root, 'scenes', x, camera, 'rgb', str(img_num).zfill(4)+'.png'))
self.depthpath.append(os.path.join(root, 'scenes', x, camera, 'depth', str(img_num).zfill(4)+'.png'))
self.labelpath.append(os.path.join(root, 'scenes', x, camera, 'label', str(img_num).zfill(4)+'.png'))
self.metapath.append(os.path.join(root, 'scenes', x, camera, 'meta', str(img_num).zfill(4)+'.mat'))
self.scenename.append(x.strip())
self.frameid.append(img_num)
if self.load_label:
collision_labels = np.load(os.path.join(root, 'collision_label', x.strip(), 'collision_labels.npz'))
self.collision_labels[x.strip()] = {}
for i in range(len(collision_labels)):
self.collision_labels[x.strip()][i] = collision_labels['arr_{}'.format(i)]
def scene_list(self):
return self.scenename
def __len__(self):
return len(self.depthpath)
def augment_data(self, point_clouds, object_poses_list):
# Flipping along the YZ plane
if np.random.random() > 0.5:
flip_mat = np.array([[-1, 0, 0],
[ 0, 1, 0],
[ 0, 0, 1]])
point_clouds = transform_point_cloud(point_clouds, flip_mat, '3x3')
for i in range(len(object_poses_list)):
object_poses_list[i] = np.dot(flip_mat, object_poses_list[i]).astype(np.float32)
# Rotation along up-axis/Z-axis
rot_angle = (np.random.random()*np.pi/3) - np.pi/6 # -30 ~ +30 degree
c, s = np.cos(rot_angle), np.sin(rot_angle)
rot_mat = np.array([[1, 0, 0],
[0, c,-s],
[0, s, c]])
point_clouds = transform_point_cloud(point_clouds, rot_mat, '3x3')
for i in range(len(object_poses_list)):
object_poses_list[i] = np.dot(rot_mat, object_poses_list[i]).astype(np.float32)
return point_clouds, object_poses_list
def __getitem__(self, index):
if self.load_label:
return self.get_data_label(index)
else:
return self.get_data(index)
def get_data(self, index, return_raw_cloud=False):
color = np.array(Image.open(self.colorpath[index]), dtype=np.float32) / 255.0
depth = np.array(Image.open(self.depthpath[index]))
seg = np.array(Image.open(self.labelpath[index]))
meta = scio.loadmat(self.metapath[index])
scene = self.scenename[index]
try:
intrinsic = meta['intrinsic_matrix']
factor_depth = meta['factor_depth']
except Exception as e:
print(repr(e))
print(scene)
camera = CameraInfo(1280.0, 720.0, intrinsic[0][0], intrinsic[1][1], intrinsic[0][2], intrinsic[1][2], factor_depth)
# generate cloud
cloud = create_point_cloud_from_depth_image(depth, camera, organized=True)
# get valid points
depth_mask = (depth > 0)
seg_mask = (seg > 0)
if self.remove_outlier:
camera_poses = np.load(os.path.join(self.root, 'scenes', scene, self.camera, 'camera_poses.npy'))
align_mat = np.load(os.path.join(self.root, 'scenes', scene, self.camera, 'cam0_wrt_table.npy'))
trans = np.dot(align_mat, camera_poses[self.frameid[index]])
workspace_mask = get_workspace_mask(cloud, seg, trans=trans, organized=True, outlier=0.02)
mask = (depth_mask & workspace_mask)
else:
mask = depth_mask
cloud_masked = cloud[mask]
color_masked = color[mask]
seg_masked = seg[mask]
if return_raw_cloud:
return cloud_masked, color_masked
# sample points
if len(cloud_masked) >= self.num_points:
idxs = np.random.choice(len(cloud_masked), self.num_points, replace=False)
else:
idxs1 = np.arange(len(cloud_masked))
idxs2 = np.random.choice(len(cloud_masked), self.num_points-len(cloud_masked), replace=True)
idxs = np.concatenate([idxs1, idxs2], axis=0)
cloud_sampled = cloud_masked[idxs]
color_sampled = color_masked[idxs]
ret_dict = {}
ret_dict['point_clouds'] = cloud_sampled.astype(np.float32)
ret_dict['cloud_colors'] = color_sampled.astype(np.float32)
return ret_dict
def get_data_label(self, index):
color = np.array(Image.open(self.colorpath[index]), dtype=np.float32) / 255.0
depth = np.array(Image.open(self.depthpath[index]))
seg = np.array(Image.open(self.labelpath[index]))
meta = scio.loadmat(self.metapath[index])
scene = self.scenename[index]
try:
obj_idxs = meta['cls_indexes'].flatten().astype(np.int32)
poses = meta['poses']
intrinsic = meta['intrinsic_matrix']
factor_depth = meta['factor_depth']
except Exception as e:
print(repr(e))
print(scene)
camera = CameraInfo(1280.0, 720.0, intrinsic[0][0], intrinsic[1][1], intrinsic[0][2], intrinsic[1][2], factor_depth)
# generate cloud
cloud = create_point_cloud_from_depth_image(depth, camera, organized=True)
# get valid points
depth_mask = (depth > 0)
seg_mask = (seg > 0)
if self.remove_outlier:
camera_poses = np.load(os.path.join(self.root, 'scenes', scene, self.camera, 'camera_poses.npy'))
align_mat = np.load(os.path.join(self.root, 'scenes', scene, self.camera, 'cam0_wrt_table.npy'))
trans = np.dot(align_mat, camera_poses[self.frameid[index]])
workspace_mask = get_workspace_mask(cloud, seg, trans=trans, organized=True, outlier=0.02)
mask = (depth_mask & workspace_mask)
else:
mask = depth_mask
cloud_masked = cloud[mask]
color_masked = color[mask]
seg_masked = seg[mask]
# sample points
if len(cloud_masked) >= self.num_points:
idxs = np.random.choice(len(cloud_masked), self.num_points, replace=False)
else:
idxs1 = np.arange(len(cloud_masked))
idxs2 = np.random.choice(len(cloud_masked), self.num_points-len(cloud_masked), replace=True)
idxs = np.concatenate([idxs1, idxs2], axis=0)
cloud_sampled = cloud_masked[idxs]
color_sampled = color_masked[idxs]
seg_sampled = seg_masked[idxs]
objectness_label = seg_sampled.copy()
objectness_label[objectness_label>1] = 1
object_poses_list = []
grasp_points_list = []
grasp_offsets_list = []
grasp_scores_list = []
grasp_tolerance_list = []
for i, obj_idx in enumerate(obj_idxs):
if obj_idx not in self.valid_obj_idxs:
continue
if (seg_sampled == obj_idx).sum() < 50:
continue
object_poses_list.append(poses[:, :, i])
points, offsets, scores, tolerance = self.grasp_labels[obj_idx]
collision = self.collision_labels[scene][i] #(Np, V, A, D)
# remove invisible grasp points
if self.remove_invisible:
visible_mask = remove_invisible_grasp_points(cloud_sampled[seg_sampled==obj_idx], points, poses[:,:,i], th=0.01)
points = points[visible_mask]
offsets = offsets[visible_mask]
scores = scores[visible_mask]
tolerance = tolerance[visible_mask]
collision = collision[visible_mask]
idxs = np.random.choice(len(points), min(max(int(len(points)/4),300),len(points)), replace=False)
grasp_points_list.append(points[idxs])
grasp_offsets_list.append(offsets[idxs])
collision = collision[idxs].copy()
scores = scores[idxs].copy()
scores[collision] = 0
grasp_scores_list.append(scores)
tolerance = tolerance[idxs].copy()
tolerance[collision] = 0
grasp_tolerance_list.append(tolerance)
if self.augment:
cloud_sampled, object_poses_list = self.augment_data(cloud_sampled, object_poses_list)
ret_dict = {}
ret_dict['point_clouds'] = cloud_sampled.astype(np.float32)
ret_dict['cloud_colors'] = color_sampled.astype(np.float32)
ret_dict['objectness_label'] = objectness_label.astype(np.int64)
ret_dict['object_poses_list'] = object_poses_list
ret_dict['grasp_points_list'] = grasp_points_list
ret_dict['grasp_offsets_list'] = grasp_offsets_list
ret_dict['grasp_labels_list'] = grasp_scores_list
ret_dict['grasp_tolerance_list'] = grasp_tolerance_list
return ret_dict
def load_grasp_labels(root):
obj_names = list(range(88))
valid_obj_idxs = []
grasp_labels = {}
for i, obj_name in enumerate(tqdm(obj_names, desc='Loading grasping labels...')):
if i == 18: continue
valid_obj_idxs.append(i + 1) #here align with label png
label = np.load(os.path.join(root, 'grasp_label', '{}_labels.npz'.format(str(i).zfill(3))))
tolerance = np.load(os.path.join(BASE_DIR, 'tolerance', '{}_tolerance.npy'.format(str(i).zfill(3))))
grasp_labels[i + 1] = (label['points'].astype(np.float32), label['offsets'].astype(np.float32),
label['scores'].astype(np.float32), tolerance)
return valid_obj_idxs, grasp_labels
def collate_fn(batch):
if type(batch[0]).__module__ == 'numpy':
return torch.stack([torch.from_numpy(b) for b in batch], 0)
elif isinstance(batch[0], container_abcs.Mapping):
return {key:collate_fn([d[key] for d in batch]) for key in batch[0]}
elif isinstance(batch[0], container_abcs.Sequence):
return [[torch.from_numpy(sample) for sample in b] for b in batch]
raise TypeError("batch must contain tensors, dicts or lists; found {}".format(type(batch[0])))
if __name__ == "__main__":
root = '/data/Benchmark/graspnet'
valid_obj_idxs, grasp_labels = load_grasp_labels(root)
train_dataset = GraspNetDataset(root, valid_obj_idxs, grasp_labels, split='train', remove_outlier=True, remove_invisible=True, num_points=20000)
print(len(train_dataset))
end_points = train_dataset[233]
cloud = end_points['point_clouds']
seg = end_points['objectness_label']
print(cloud.shape)
print(cloud.dtype)
print(cloud[:,0].min(), cloud[:,0].max())
print(cloud[:,1].min(), cloud[:,1].max())
print(cloud[:,2].min(), cloud[:,2].max())
print(seg.shape)
print((seg>0).sum())
print(seg.dtype)
print(np.unique(seg))
``` |
{
"source": "1heart/tactile-in-hand",
"score": 2
} |
#### File: digit_inhand/src/InhandpyDatasetCSVWriter.py
```python
import rospy
import rospkg
import numpy as np
import pandas as pd
import io
import os
import tf
import json
import cv2
import imageio
from cv_bridge import CvBridge
from sensor_msgs.msg import PointCloud
from sensor_msgs.msg import Image
from geometry_msgs.msg import Point32
from cv_bridge import CvBridge, CvBridgeError
from jsk_rviz_plugins.msg import OverlayText
BASE_PATH = ''
class InhandpyDatasetCSVWriter:
def __init__(self):
self.obj_shape = rospy.get_param("obj_shape")
# ros vars
self.tf_listener = tf.TransformListener()
self.bridge = CvBridge()
self.digit_image_sub = rospy.Subscriber(
"/digit/digit_alpha/image_raw/", Image, self.callback_digit_image, queue_size=1)
# overhead image editor
self.bridge = CvBridge()
self.overhead_image_sub = rospy.Subscriber(
"/rgb/image_raw", Image, self.callback_overhead_image, queue_size=1)
self.overhead_image_crop_pub = rospy.Publisher("/rgb/image_crop", Image, queue_size=1)
# dataset vars
self.dstdir_dataset = rospy.get_param("dstdir_dataset")
self.bag_name = rospy.get_param("bag_name")
rospy.loginfo("[InhandpyDatasetCSVWriter] Extracting bag {0}.bag".format(self.bag_name))
self.data_list = []
self.data_csvname = "poses_imgs"
os.popen("mkdir -p {0}/{1}/{2:04d}/color".format(self.dstdir_dataset, self.bag_name, 0), 'r')
# contact thresholds
self.contact_thresh = 0.1
if (self.obj_shape == 'sphere'): self.contact_thresh = 0.1
if (self.obj_shape == 'cube'): self.contact_thresh = 0.075
# contact episode vars
self.contact_episode_idx = 0
self.counter = 0
self.num_incontact = 0
self.min_num_incontact = 5
self.world_T_obj = np.array([[1., 0., 0., 0.], [0., 1., 0., 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]])
if (self.obj_shape == 'sphere'): self.world_T_obj = np.array([[1., 0., 0., 0.07], [0., 1., 0., -0.0675], [0., 0., 1., 0.09], [0., 0., 0., 1.]])
if (self.obj_shape == 'cube'): self.world_T_obj = np.array([[1., 0., 0., -0.15], [0., 1., 0., -0.1475], [0., 0., 1., 0.02], [0., 0., 0., 1.]])
# contact flag publisher
self.contact_flag_pub = rospy.Publisher("/digit/contact/flag", OverlayText, queue_size=1)
# cloud publisher: accumulate contact points over timesteps
self.cloud_pub = rospy.Publisher("/digit/center/cloud", PointCloud, queue_size=1)
self.cloud_msg = PointCloud()
# read mean, std images from file for the particular bag dataset
rospack = rospkg.RosPack()
self.path_pkg = rospack.get_path('digit_inhand')
filename = "{0}/local/resources/digit/{1}/mean_std_img.json".format(self.path_pkg, self.bag_name)
with open(filename) as f:
data = json.load(f)
self.mean_img = np.asarray(data['mean_img'], dtype=np.float32)
self.std_img = np.asarray(data['std_img'], dtype=np.float32)
def in_contact(self, img):
# Compute per-image sum of stddev squared
diff = np.linalg.norm((img - self.mean_img)/self.std_img)**2
diff = diff / self.mean_img.size
# Count the percent of pixels that are significantly different from their mean values
diff_cnt = np.sum(((img - self.mean_img)/self.std_img)**2 > 4**2)
diff_cnt = float(diff_cnt) / float(self.mean_img.size)
# rospy.loginfo("[InhandpyDatasetCSVWriter::callback_digit_image] diff_cnt: {}".format(diff_cnt))
contact_flag = diff_cnt > self.contact_thresh
# rospy.loginfo("diff_cnt: {0}, contact_flag: {1}\n".format(diff_cnt, contact_flag))
contact_flag_msg = OverlayText()
contact_flag_msg.text = "IN CONTACT" if (contact_flag is True) else ""
self.contact_flag_pub.publish(contact_flag_msg)
rospy.loginfo("[InhandpyDatasetCSVWriter::in_contact] diff_cnt: {0}, contact_flag {1}".format(diff_cnt, contact_flag))
return contact_flag
def rosimg_to_numpy(self, imgmsg):
if hasattr(imgmsg, 'format') and 'compressed' in imgmsg.format:
img = np.asarray(Image.open(io.BytesIO(imgmsg.data)))
return img
return np.frombuffer(imgmsg.data, dtype=np.uint8).reshape(imgmsg.height, imgmsg.width, 3)[:, :, ::-1]
def interpolate_img(self, img, rows, cols):
img = cv2.resize(img, dsize=(cols, rows),interpolation=cv2.INTER_AREA)
return img
def save_episode_step(self, eps_idx, step_idx, img_color, obj_pos, obj_ori, digit_pos, digit_ori):
img_color_loc = "{0:04d}/color/{1:04d}.png".format(eps_idx, step_idx)
# reshape img to match tacto: (240,320,3) -> (120,160,3) -> (160,120,3)
img_color = self.interpolate_img(img=img_color, rows=120, cols=160)
img_color = np.transpose(img_color, (1,0,2))
rospy.loginfo("[save_episode_step::in_contact] img_color_loc: {0}/{1}/{2}".format(
self.dstdir_dataset, self.bag_name, img_color_loc))
imageio.imwrite("{0}/{1}/{2}".format(self.dstdir_dataset,
self.bag_name, img_color_loc), img_color)
img_normal_loc = "{0:04d}/normal/{1:04d}.png".format(eps_idx, step_idx)
data_row = {'obj_pos': obj_pos.tolist(),
'obj_ori': obj_ori.tolist(),
'digit_pos': digit_pos.tolist(),
'digit_ori': digit_ori.tolist(),
'img_color_loc': img_color_loc,
'img_normal_loc': img_normal_loc
}
self.data_list.append(data_row)
def save_episode_dataset(self, eps_idx):
csvfile = "{0}/{1}/{2:04d}/{3}.csv".format(self.dstdir_dataset, self.bag_name, eps_idx, self.data_csvname)
self.data_frame = pd.DataFrame(self.data_list)
self.data_frame.to_csv(csvfile)
rospy.loginfo("Saving episode {0} to {1}".format(eps_idx, csvfile))
# reset vars for a new episode
self.data_list = []
os.popen("mkdir -p {0}/{1}/{2:04d}/color".format(self.dstdir_dataset, self.bag_name, eps_idx+1), 'r')
def callback_overhead_image(self, msg):
try:
img = self.bridge.imgmsg_to_cv2(msg, desired_encoding='passthrough')
# img = self.rosimg_to_numpy(msg)
except CvBridgeError as e:
rospy.logwarn(
"[InhandpyDatasetCSVWriter::callback_digit_image] {0}".format(e))
return
img_crop = img[100:500, 450:1000, :] # 720 x 1280 x 4
img_msg = self.bridge.cv2_to_imgmsg(img_crop, encoding='passthrough')
self.overhead_image_crop_pub.publish(img_msg)
def callback_digit_image(self, msg):
try:
img = self.rosimg_to_numpy(msg)
except CvBridgeError as e:
rospy.logwarn(
"[InhandpyDatasetCSVWriter::callback_digit_image] {0}".format(e))
return
try:
# looks up arg2 frame transform in arg1 frame
# (obj_pos, obj_ori) = self.tf_listener.lookupTransform(
# "world", "/object/center/", rospy.Time(0))
(tf_digit_pos, tf_digit_ori) = self.tf_listener.lookupTransform(
"world", "/digit/center/", rospy.Time(0)) # returns list
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rospy.logwarn(
"[InhandpyDatasetCSVWriter::callback_digit_image] TF lookup failed")
return
if (self.in_contact(img)):
header = msg.header
header.frame_id = "world"
self.cloud_msg.header = header
self.cloud_msg.points.append(Point32(tf_digit_pos[0], tf_digit_pos[1], tf_digit_pos[2]))
self.cloud_pub.publish(self.cloud_msg)
# tf listener
tf_digit_ori = tf.transformations.quaternion_matrix(tf_digit_ori)[0:3, 0:3] # 4x4
tf_digit_pos, tf_digit_ori = np.array(tf_digit_pos), np.array(tf_digit_ori)
world_T_digit = np.eye(4)
world_T_digit[0:3, 0:3], world_T_digit[0:3,-1] = tf_digit_ori, tf_digit_pos
# digit_pos = np.array([0.,0.,0.011])
# digit_ori = np.array([[2.220446049250313e-16, -0.0, -1.0],
# [0.0, 1.0, -0.0], [1.0, 0.0, 2.220446049250313e-16]])
# T_digit = np.eye(4)
# T_digit[0:3, 0:3], T_digit[0:3,-1] = digit_ori, digit_pos
# placing digit in a new world frame
world_prime_T_digit = np.eye(4)
# object pose in new world frame
world_prime_T_obj = np.matmul(world_prime_T_digit, np.matmul(np.linalg.inv(world_T_digit), self.world_T_obj))
# write to file
T_digit = world_prime_T_digit
T_obj = world_prime_T_obj
obj_pos, obj_ori = T_obj[0:3,-1], T_obj[0:3,0:3]
digit_pos, digit_ori = T_digit[0:3,-1], T_digit[0:3,0:3]
self.save_episode_step(eps_idx=self.contact_episode_idx, step_idx=self.num_incontact, img_color=img,
obj_pos=obj_pos, obj_ori=obj_ori, digit_pos=digit_pos, digit_ori=digit_ori)
self.num_incontact = self.num_incontact + 1
else:
self.counter = self.counter + 1
# start new contact episode
if ((self.counter > 10) & (self.num_incontact > 1)):
if (self.num_incontact > self.min_num_incontact):
self.save_episode_dataset(eps_idx=self.contact_episode_idx)
self.contact_episode_idx = self.contact_episode_idx + 1
else:
self.data_list = []
self.counter = 0
self.num_incontact = 0
def main():
rospy.init_node('inhandpy_dataset_csv_writer', anonymous=True)
rospy.loginfo("Initialized inhandpy_dataset_csv_writer node.")
inhandpy_dataset_writer = InhandpyDatasetCSVWriter()
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
if __name__ == '__main__':
main()
```
#### File: scripts/geometry/tactile_depth_to_cloud.py
```python
import numpy as np
import os
import hydra
import logging
from attrdict import AttrDict
import copy
import cv2
import open3d as o3d
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader, ConcatDataset
import torchvision
import torchvision.transforms as transforms
from torch.utils.tensorboard import SummaryWriter
import types
from inhandpy.dataio.sim_digit_data_loaders import DigitTactoImageTfSeqDataset
from inhandpy.utils import geom_utils, vis_utils
log = logging.getLogger(__name__)
BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
CONFIG_PATH = os.path.join(BASE_PATH, "config/depth_normals_cloud.yaml")
def visualize_geometries(vis3d, seq_idx, pose_seq_obj, pose_seq_digit_top, pose_seq_digit_bot,
T_cam_offset, points3d, clouds, frames, meshes, params=None):
# transforms
T_obj_curr = geom_utils.Rt_to_T(pose_seq_obj[0][seq_idx, :], pose_seq_obj[1][seq_idx, :])
T_digit_top_curr = geom_utils.Rt_to_T(pose_seq_digit_top[0][seq_idx, :], pose_seq_digit_top[1][seq_idx, :])
T_digit_bot_curr = geom_utils.Rt_to_T(pose_seq_digit_bot[0][seq_idx, :], pose_seq_digit_bot[1][seq_idx, :])
T_cam_top_curr = torch.matmul(T_digit_top_curr, T_cam_offset)
T_cam_bot_curr = torch.matmul(T_digit_bot_curr, T_cam_offset)
# clouds
points3d_vis = points3d.cpu().detach().numpy()
clouds[0].points = o3d.utility.Vector3dVector(points3d_vis.transpose())
# meshes, frames
meshes = [copy.deepcopy(mesh) for mesh in meshes]
frames = [copy.deepcopy(frame) for frame in frames]
vis3d.transform_geometry_absolute([T_obj_curr, T_digit_top_curr, T_digit_bot_curr], meshes)
vis3d.transform_geometry_absolute([T_obj_curr, T_cam_top_curr, T_cam_bot_curr], frames)
vis3d.add_geometry(clouds)
vis3d.add_geometry(meshes)
vis3d.add_geometry(frames)
vis3d.render()
# vis3d.pan_scene()
vis3d.remove_geometry(clouds)
vis3d.remove_geometry(meshes)
vis3d.remove_geometry(frames)
def visualize_imgs(seq_idx, img_seq_depth, img_seq_normal, params=None, tpause=100):
img_depth = ((img_seq_depth[seq_idx, :, :, :]).permute(1, 2, 0)).cpu().detach().numpy()
img_normal = ((img_seq_normal[seq_idx, :, :, :]).permute(1, 2, 0)).cpu().detach().numpy()
img_depth = vis_utils.depth_to_color(img_depth)
cv2.imshow("img_depth", img_depth)
cv2.imshow("img_normal", img_normal)
cv2.waitKey(tpause)
def data_loader(dataset_names, datatype, params):
transform = transforms.Compose([transforms.ToTensor()])
dataset_list = []
for (ds_idx, dataset_name) in enumerate(dataset_names):
srcdir_dataset = f"{BASE_PATH}/local/datasets/{dataset_name}"
dataset_list.append(DigitTactoImageTfSeqDataset(
dir_dataset=f"{srcdir_dataset}/{datatype}", base_path=BASE_PATH, transform=transform,
downsample_imgs=params.downsample_imgs, img_types=params.img_types, digit_id=params.digit_id))
dataset = ConcatDataset(dataset_list)
dataloader = DataLoader(dataset, batch_size=params.batch_size,
shuffle=params.shuffle, num_workers=params.num_workers)
return dataloader, dataset
@hydra.main(config_path=CONFIG_PATH)
def main(cfg):
# data loader
train_dataloader, train_dataset = data_loader(
dataset_names=cfg.dataset_names, datatype="train", params=cfg.dataloader)
# init values
view_params = AttrDict({'fov': 60, 'front': [-0.56, 0.81, 0.14], 'lookat': [
-0.006, -0.0117, 0.043], 'up': [0.0816, -0.112, 0.990], 'zoom': 0.5})
vis3d = vis_utils.Visualizer3d(base_path=BASE_PATH, view_params=view_params)
T_cam_offset = torch.tensor(cfg.sensor.T_cam_offset)
proj_mat = torch.tensor(cfg.sensor.P)
gel_depth = torch.tensor(np.loadtxt(f"{BASE_PATH}/{cfg.sensor.gel_depth_map_loc}", delimiter=',') - cfg.sensor.gel_depth_offset)
# iterate over dataset
for ds_idx, dataset in enumerate(train_dataset):
print(f"Dataset idx: {ds_idx:03d}")
# load imgs: S x C x H x W , poses: (S x 3 x 3, S x 3) <-> (R, t)
img_seq_color, img_seq_depth, img_seq_normal, pose_seq_obj, pose_seq_digit_top, pose_seq_digit_bot = dataset
normals3d_seq = img_seq_normal.view(img_seq_normal.shape[0], img_seq_normal.shape[1], -1) # S x 3 x N
pose_seq_sensor = pose_seq_digit_bot if (cfg.dataloader.digit_id == "bot") else pose_seq_digit_top
S, C, H, W = img_seq_color.shape
# initialize open3d geometries
clouds = vis3d.init_geometry(geom_type="cloud", num_items=1)
frames = vis3d.init_geometry(geom_type="frame", num_items=3) # obj, digit_top, digit_bot
meshes = vis3d.init_geometry(geom_type="mesh", num_items=3, file_names=[
"textured_cube_rounded.obj", "digit.STL", "digit.STL"]) # obj, digit_top, digit_bot
meshes[0].scale(0.25 * 0.05, center=(0, 0, 0))
step = 1
seq_idx = 0
while (seq_idx < S):
print(f"Sequence idx: {seq_idx:03d}")
# get points, normals
normals3d = normals3d_seq[seq_idx, :, :]
img_depth = img_seq_depth[seq_idx, :, :]
# get transforms
T_sensor = geom_utils.Rt_to_T(R=pose_seq_sensor[0][seq_idx, :], t=pose_seq_sensor[1][seq_idx, :])
T_camera = torch.matmul(T_sensor, T_cam_offset)
T_obj = geom_utils.Rt_to_T(R=pose_seq_obj[0][seq_idx, :], t=pose_seq_obj[1][seq_idx, :])
# preprocess depth
img_depth = torch.flip(img_depth, dims=[0, 1])
img_depth[img_depth >= gel_depth] = 0
# inverse projection
points3d_world = geom_utils.depth_to_pts3d(depth=img_depth, P=proj_mat, V=torch.inverse(T_camera), params=cfg.sensor, ordered_pts=False)
# visualization
visualize_geometries(vis3d=vis3d, seq_idx=seq_idx, pose_seq_obj=pose_seq_obj, pose_seq_digit_top=pose_seq_digit_top,
pose_seq_digit_bot=pose_seq_digit_bot, T_cam_offset=T_cam_offset, points3d=points3d_world, clouds=clouds, frames=frames, meshes=meshes, params=cfg)
visualize_imgs(seq_idx, img_seq_depth, img_seq_normal, params=cfg)
if not vis3d.paused.value:
seq_idx = (seq_idx + step) % S
vis3d.clear_geometries()
vis3d.destroy()
if __name__ == '__main__':
main()
```
#### File: scripts/img_translation/create_pix2pix_dataset.py
```python
import os
import glob
import pandas as pd
import numpy as np
from PIL import Image
import imageio
import logging
log = logging.getLogger(__name__)
def load_seq_from_file(csvfile):
seq_data = pd.read_csv(csvfile) if os.path.exists(csvfile) else None
if seq_data is not None:
seq_data = None if (len(seq_data) < 2) else seq_data
return seq_data
def main():
base_path = '/home/paloma/code/fair_ws/tactile-in-hand/inhandpy/'
# update src + dst dirs
# src_dataset_dir = f'{base_path}/local/datasets/tacto/sphere/dataset_0000/'
# src_dataset_dir = f'{base_path}/local/datasets/real/sphere/digit-0.5mm-ball-bearing-zup_2021-08-31-21-37-35/'
src_dataset_dir = f'{base_path}/local/datasets/real/cube/digit-flatcorner_2021-09-10-21-13-17/'
dst_dataset_dir = f'{base_path}/local/datasets/pix2pix/real_pyramid'
dataset_types = ['train', 'test']
dir_A = f"{dst_dataset_dir}/A/"
dir_B = f"{dst_dataset_dir}/B/"
dir_AB = f"{dst_dataset_dir}/AB/"
# iterate over train/test data
for dataset_type in dataset_types:
os.makedirs(f"{dir_A}/{dataset_type}", exist_ok=True)
os.makedirs(f"{dir_B}/{dataset_type}", exist_ok=True)
os.makedirs(f"{dir_AB}/{dataset_type}", exist_ok=True)
csvfiles = sorted(glob.glob(f"{src_dataset_dir}/{dataset_type}/**/*.csv"))
dst_img_idx = 0
# iterate over contact sequences
for eps_idx in range(9, len(csvfiles)):
csvfile = csvfiles[eps_idx]
seq_data = load_seq_from_file(csvfile)
if seq_data is None:
continue
print(f"Reading contact sequence from {csvfile}")
img_color_locs, img_normal_locs = None, None
if 'img_bot_color_loc' in seq_data: img_color_locs = seq_data[f"img_bot_color_loc"]
if 'img_color_loc' in seq_data: img_color_locs = seq_data[f"img_color_loc"]
if 'img_bot_normal_loc' in seq_data: img_normal_locs = seq_data[f"img_bot_normal_loc"]
# if 'img_normal_loc' in seq_data: img_normal_locs = seq_data[f"img_normal_loc"]
# iterate over images within each sequence
for img_idx in range(0, len(img_color_locs)):
img_color = Image.open(f"{src_dataset_dir}/{dataset_type}/{img_color_locs[img_idx]}")
if img_normal_locs is not None:
img_normal = Image.open(f"{src_dataset_dir}/{dataset_type}/{img_normal_locs[img_idx]}")
else:
img_normal = img_color
img_normal = np.zeros_like(img_color)
img_normal[:, :, 2] = 255
imageio.imwrite(f"{dir_A}/{dataset_type}/{dst_img_idx:04d}.png", img_color)
imageio.imwrite(f"{dir_B}/{dataset_type}/{dst_img_idx:04d}.png", img_normal)
dst_img_idx = dst_img_idx + 1
break # single episode
os.system(f"python scripts/img_translation/combine_A_and_B.py --fold_A {dir_A} --fold_B {dir_B} --fold_AB {dir_AB}")
log.info(f"Created tactile dataset of {dst_img_idx} images at {dst_dataset_dir}.")
if __name__=='__main__':
main()
```
#### File: scripts/img_translation/train_test_mlp_model.py
```python
import os
import imageio
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
from inhandpy.thirdparty.pix2pix.options.test_options import TestOptions
from inhandpy.thirdparty.pix2pix.options.train_options import TrainOptions
from inhandpy.thirdparty.pix2pix.data import create_dataset
from inhandpy.utils import vis_utils, data_utils
import matplotlib.pyplot as plt
plt.ion()
BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
class MLPNetwork(nn.Module):
def __init__(
self, input_size=5, output_size=3, hidden_size=32):
super().__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def visualize_vector(img_vec, title=''):
img_mat = img_vec.permute(1, 0) # n x 3 -> 3 x n
img_mat = img_mat.reshape(3, 256, 256) # 3 x n -> 3 x H x W
plt.imshow(img_mat.permute(1,2,0).cpu().detach().numpy())
plt.title(title)
plt.show()
plt.pause(1e-1)
def vector_to_img(img_vec, img_numpy=False):
img_mat = img_vec.permute(1, 0) # n x 3 -> 3 x n
img_mat = img_mat.reshape(3, 256, 256) # 3 x n -> 3 x H x W
if img_numpy: img_mat = img_mat.permute(1,2,0).cpu().detach().numpy()
return img_mat
def update_img_dict(img_dict, inputs, outputs, labels):
img_color = (vector_to_img(img_vec=inputs[:, 0:3]) + 1) / 2.0
img_normal_gt = (vector_to_img(img_vec=labels) + 1) / 2.0
img_normal_pred = (vector_to_img(img_vec=outputs) + 1) / 2.0
img_list = [img_color, img_normal_gt, img_normal_pred]
idx = 0
for key in img_dict:
img = img_list[idx]
img = data_utils.interpolate_img(img=img, rows=160, cols=120)
img_dict[key].append(transforms.ToPILImage()(img))
idx = idx + 1
return img_dict
def save_video(img_dict, dstdir, dataset_name, fps=15):
print("Writing img dict outputs as videos to: {0}".format(dstdir))
for key in img_dict:
vidfile = f"{dstdir}/{dataset_name}_{key}.mp4"
imageio.mimwrite(vidfile, img_dict[key], fps=fps)
def preproc_data(data):
# read data
color = data['A'].squeeze(0)
normal = data['B'].squeeze(0)
# compute network input
ch, rows, cols = color.shape
row_loc_mat = (torch.arange(0, cols)).repeat(rows, 1) / cols
col_loc_mat = (torch.arange(0, rows).unsqueeze(-1)).repeat(1, cols) / rows
inputs = torch.zeros((5, rows, cols))
inputs[0:3, :] = color
inputs[3, :] = row_loc_mat
inputs[4, :] = col_loc_mat
# reshape input / labels
inputs = inputs.reshape((5, -1))
labels = normal.reshape((3, -1))
inputs = inputs.permute(1,0) # 5 x n -> n x 5
labels = labels.permute(1,0) # 3 x n -> n x 3
return inputs, labels
def train_model(dataset, n_epochs=500, opt=None):
model = MLPNetwork()
criterion = torch.nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=1e-3)
for epoch in range(n_epochs):
loss_vec = torch.zeros(len(dataset))
for i, data in enumerate(dataset):
inputs, labels = preproc_data(data)
# clear gradients
optimizer.zero_grad()
# forward, backward, optimize
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
loss_vec[i] = loss.item()
print(f'epoch: {epoch}, loss: {torch.mean(loss_vec)}')
# save model
torch.save(model.state_dict(), "{BASE_PATH}/local/pix2pix/checkpoints/mlp")
def test_model(dataset, opt=None):
model = MLPNetwork()
criterion = torch.nn.MSELoss()
model.load_state_dict(torch.load("{BASE_PATH}/local/pix2pix/checkpoints/mlp"))
model.eval()
loss_vec = torch.zeros(len(dataset))
img_dict = {}
img_dict['color'], img_dict['normal_gt'], img_dict['normal_pred'] = [], [], []
for i, data in enumerate(dataset):
inputs, labels = preproc_data(data) # n x 5, n x 3
outputs = model.forward(inputs) # n x 3
## collect imgs to be saved to file as a video
img_dict = update_img_dict(img_dict, inputs, outputs, labels)
## visualize predictions
# fig1, axs1 = plt.subplots(nrows=1, ncols=3, num=1, clear=True, figsize=(22, 8))
# img_color_np = (vector_to_img(img_vec=inputs[:, 0:3]) + 1) / 2.0
# img_normal_gt_np = (vector_to_img(img_vec=labels) + 1) / 2.0
# img_normal_pred_np = (vector_to_img(img_vec=outputs) + 1) / 2.0
# vis_utils.visualize_imgs(fig=fig1, axs=[axs1[0], axs1[1], axs1[2]],
# img_list=[img_color_np, img_normal_gt_np, img_normal_pred_np],
# titles=['img_color', 'img_normal_gt', 'img_normal_pred'], cmap='coolwarm')
# plt.pause(1e-3)
loss_vec[i] = criterion(outputs, labels)
# save img lists as video to file
dstdir_vid=f"{BASE_PATH}/local/pix2pix/qual_videos/mlp"
os.makedirs(dstdir_vid, exist_ok=True)
dataset_name = opt.dataroot.split('/')[-2]
save_video(img_dict, dstdir=dstdir_vid, dataset_name=dataset_name)
print(f"test mse reconstruction loss: {torch.mean(loss_vec)}")
def main():
mode = 'test' # 'train', 'test'
if (mode == 'train'): opt = TrainOptions().parse()
if (mode == 'test'): opt = TestOptions().parse()
opt.num_threads = 0
opt.batch_size = 1
opt.serial_batches = True
opt.no_flip = True
opt.display_id = -1
dataset = create_dataset(opt)
if (mode == 'train'): train_model(dataset, opt=opt)
if (mode == 'test'): test_model(dataset, opt=opt)
if __name__ == '__main__':
main()
```
#### File: inhandpy/utils/logger.py
```python
import numpy as np
import pandas as pd
import logging
log = logging.getLogger(__name__)
class Logger:
def __init__(self, params=None):
self.params = params
self.dataframe = pd.DataFrame()
def get_data(self):
return self.dataframe
def log_val(self, names, vals, index_val, index_name=None):
data_dict = {}
for name, val in zip(names, vals):
data_dict[name] = [val]
data_row = pd.DataFrame(data_dict, index=[index_val], dtype=object)
if index_name is not None: data_row.index.name = index_name
dfs = [data_row] if self.dataframe.empty else [self.dataframe, data_row]
self.dataframe = pd.concat([df.stack() for df in dfs], axis=0).unstack()
def set_index(self, index_vals):
self.dataframe = self.dataframe.set_index(index_vals)
def write_data_to_file(self, csvfile, verbose=False):
if verbose: log.info(f"Saving logged data to {csvfile}")
self.dataframe.to_csv(csvfile)
``` |
{
"source": "1henno1/vcs-automat-misc",
"score": 2
} |
#### File: vcs-automat-misc/server/create_plots.py
```python
import os,sys,inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.insert(0, current_dir)
import time
import configparser
import logging
import numpy as np
from datetime import datetime, date, timedelta
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import mysql.connector
from mysql.connector import errorcode
# try to set up a german locale
import locale
for lang in ('de_DE', 'de_DE.utf8', 'de_CH', 'de_CH.utf8'):
try:
locale.setlocale(locale.LC_ALL, lang)
break
except Exception:
pass
# globals for plot formatting
YEAR = datetime.now().year
SIZE = (6, 4.5)
COLOR = 'gray'
# set up logging config
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s\t%(levelname)s\t[%(name)s: %(funcName)s]\t%(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
handlers=[logging.FileHandler("logs/create_plots.log"), logging.StreamHandler()])
# setting of global minimum logging level
logging.disable(logging.DEBUG)
# set-up for logging of main. Level options: DEBUG, INFO, WARNING, ERROR, CRITICAL
loglevel = logging.DEBUG
logtitle = 'main'
logger = logging.getLogger(logtitle)
logger.setLevel(loglevel)
# start of script
logger.info('started at '+time.strftime('%d.%m.%y, %H:%M'))
# read config
config = configparser.SafeConfigParser()
config.read('settings.ini')
# set up paths for the wordpress plugin and image directory
plugin_path = str(config['general']['wordpress_plugin_dir'])
img_path = plugin_path+"/img"
if os.path.isdir(img_path) is False:
logger.error('Image directory for wordpress plugin not found at '+img_path)
sys.exit()
# set up database connection
try:
dbcn = mysql.connector.connect(user=str(config['general']['mysql_user']), password=str(config['general']['mysql_password']), host='localhost', database='vcs_automat')
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Username or password wrong")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
sys.exit()
# read archive data from the database, each entry corresponds to one drink having been released
db = dbcn.cursor()
db.execute("SELECT unixtime FROM archive")
raw_data = [unixtime for (unixtime,) in db]
dbcn.close()
# process and combine raw data into bins corresponding to hours, weekdays, weeks and years
data = [{'hour': int(i.strftime('%H')), 'weekday': int(i.strftime('%w')), 'week': int(i.strftime('%W')), 'year': int(i.strftime('%Y'))} for i in map(datetime.fromtimestamp, raw_data)]
hours = np.array([sum(entry.get('hour') == i for entry in data) for i in range(0,23+1)]) # all data is used
weekdays = np.array([sum(entry.get('weekday') == i for entry in data) for i in range(0,6+1)]) # all data is used
weeks = np.array([sum(entry.get('year') == YEAR and entry.get('week') == j for entry in data) for j in range(0,56+1)]) # only this year's data is used
# function to format the title
def set_title(plt, text):
plt.suptitle(text)
plt.title('aktualisiert '+time.strftime('%d.%m.%y, %H:%M'), fontdict={'fontsize': 8}, color='gray')
# define axis formatter for months
months = mdates.MonthLocator()
monthsFmt = mdates.DateFormatter('1. %b')
#
# PLOT BASED ON WEEKDAYS
#
if sum(weekdays) != 0:
fig, ax = plt.subplots()
fig.set_size_inches(SIZE)
weekdays_title = ['Montag', 'Dienstag', 'Mittwoch', 'Donnerstag', 'Freitag', 'Samstag', 'Sonntag']
ax.bar(weekdays_title, weekdays/sum(weekdays)*100, color=COLOR)
fig.autofmt_xdate()
plt.ylabel('relativer Konsum [%]')
set_title(plt, 'Durchschnittlicher Konsum nach Wochentag')
plt.savefig('img/weekday.svg', transparent=True)
plt.savefig(img_path+'/weekday.svg', transparent=True)
plt.close()
#
# PLOT BASED ON HOUR OF THE DAY
#
if sum(hours) != 0:
fig, ax = plt.subplots()
fig.set_size_inches(SIZE)
ax.bar(range(0,23+1), hours/sum(hours)*100, align='edge', color=COLOR)
ax.set_xlim(0, 24)
ax.set_xticks([1,2,4,5,7,8,10,12,14,16,17,19,20,22,23], minor=True)
plt.xticks((0,3,6,9,12,15,18,21,24))
plt.ylabel('relativer Konsum [%]')
set_title(plt, 'Durchnittlicher Konsum nach Uhrzeit')
plt.savefig(img_path+'/hour.svg', transparent=True)
plt.close()
#
# PLOT OF WEEKLY CONSUMPTION THIS YEAR
#
fig, ax = plt.subplots()
fig.set_size_inches(SIZE)
first_day = date(YEAR, 1, 1)
last_day = date(YEAR, 12, 31)
# figure out which date the monday of the first week of the current year falls on, so that the weeknumbers are correctly defined
monday_of_week_zero = first_day - timedelta(days = first_day.weekday())
weeknumber_start = 0 if first_day.weekday() > 0 else 1
weeknumber_end = int(last_day.strftime('%W'))
x = range(int(mdates.date2num(monday_of_week_zero)), int(mdates.date2num(last_day))+1, 7)
y = weeks[weeknumber_start:weeknumber_end+1]
ax.bar(x, y, width=6, align='edge', color=COLOR)
ax.xaxis.set_major_locator(mdates.MonthLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter('1. %b'))
fig.autofmt_xdate()
ax.set_xlim(int(mdates.date2num(datetime(YEAR, 1, 1))), int(mdates.date2num(datetime(YEAR, 12, 31))))
plt.ylabel('Gesamtkonsum')
set_title(plt, 'Wöchentlicher Konsum in '+str(YEAR))
plt.savefig(img_path+'/year_'+str(YEAR)+'.svg', transparent=True)
plt.close()
``` |
{
"source": "1hoen/ipet",
"score": 2
} |
#### File: ipet/evaluation/IPETFilter.py
```python
import xml.etree.ElementTree as ElementTree
import numpy as np
from ipet.concepts import IpetNode, IpetNodeAttributeError
import logging
import pandas as pd
from ipet.evaluation import TestSets
logger = logging.getLogger(__name__)
class IPETValue(IpetNode):
nodetag = "Value"
def __init__(self, name = None, active = True):
"""
constructs an Ipet Instance
Parameters
----------
name : The name of this problem
active : True or "True" if this element should be active, False otherwise
"""
super(IPETValue, self).__init__(active)
self.name = name
def checkAttributes(self):
if self.name is None:
raise IpetNodeAttributeError("name", "No name specified")
return True
def getEditableAttributes(self):
return ["name"] + super(IPETValue, self).getEditableAttributes()
@staticmethod
def getNodeTag():
return IPETValue.nodetag
def getName(self):
return self.name
def getValue(self, dtype = None):
if dtype is None:
for mytype in [int, float, str]:
try:
return mytype(self.name)
except ValueError:
continue
elif dtype != object:
return dtype.type(self.name)
return self.name
def toXMLElem(self):
me = ElementTree.Element(IPETValue.getNodeTag(), self.attributesToStringDict())
return me
class IPETComparison:
"""
comparison operators for filters. All standard binary comparisons + float comparisons (with tolerance)
+ percentage based inequality
"""
comparisondict = {
"le":"le",
"lt":"lt",
"gt":"gt",
"ge":"ge",
"eq":"eq",
"neq":"neq"
}
def __init__(self, operator):
"""
constructs a comparison object by passing an appropriate operator as string
"""
if str(operator) in IPETComparison.comparisondict:
self.operator = str(operator)
else:
raise KeyError("Unknown key value %s" % (operator))
def compare(self, x, y):
method = getattr(self, "method_" + IPETComparison.comparisondict[self.operator])
try:
return method(x, y)
except TypeError as t:
logger.error("Got type error %s comparing elements x:%s and y:%s" % (t, x, y))
return 0
def method_le(self, x, y):
return x <= y
def method_lt(self, x, y):
return x < y
def method_ge(self, x, y):
return x >= y
def method_gt(self, x, y):
return x > y
def method_eq(self, x, y):
return x == y
def method_neq(self, x, y):
return x != y
class IPETFilter(IpetNode):
"""
Filters are used for selecting subsets of problems to analyze.
"""
valueoperators = ["keep", "drop"]
listoperators = ["diff", "equal"]
attribute2Options = {
"anytestrun":["one", "all"],
"operator":list(IPETComparison.comparisondict.keys())
+valueoperators
+listoperators}
nodetag = "Filter"
DEFAULT_ANYTESTRUN = 'all'
_storedcol_ = None
_storeddf_ = None
def __init__(self, expression1 = None, expression2 = None, operator = "ge", anytestrun = DEFAULT_ANYTESTRUN, active = True, datakey = None):
"""
filter constructor
Parameters
----------
expression1 : integer, float, string, or column name
expression2 : integer, float, string, or column name
datakey : available data key for drop and keep filters
operator : operator such that evaluation expression1 op expression2 yields True or False
anytestrun : either 'one' or 'all'
active : True or "True" if this filter should be active, False otherwise
"""
super(IPETFilter, self).__init__(active)
self.expression1 = expression1
self.expression2 = expression2
self.anytestrun = anytestrun
self.values = []
self._updatevalueset = False
self.set_operator(operator)
self.datakey = datakey
def checkAttributes(self):
if self.operator in self.valueoperators and self.values == []:
raise IpetNodeAttributeError("operator", "Trying to use a filter with operator {0} and empty value set".format(self.operator))
if self.operator in self.valueoperators and self.datakey is None or self.datakey == "":
raise IpetNodeAttributeError("datakey", "Trying to use a filter with operator '{}' and unspecified data key '{}'".format(self.operator, self.datakey))
if self.anytestrun not in self.attribute2Options["anytestrun"]:
raise IpetNodeAttributeError("anytestrun", "Wrong attribute {} passed as 'anytestrun' property. Should be in {}".format(self.anytestrun, self.attribute2Options["anytestrun"]))
return True
@staticmethod
def fromDict(attrdict):
expression1 = attrdict.get('expression1')
expression2 = attrdict.get('expression2')
anytestrun = attrdict.get('anytestrun', IPETFilter.DEFAULT_ANYTESTRUN)
operator = attrdict.get('operator')
datakey = attrdict.get('datakey')
active = attrdict.get('active', True)
return IPETFilter(expression1, expression2, operator, anytestrun, active, datakey)
@staticmethod
def processXMLElem(elem):
"""
inspect and process an xml element
"""
elemdict = dict(elem.attrib)
# filter_ must be written with a trailing underscore "_" not to conflict with the filter method of Python
filter_ = IPETFilter.fromDict(elemdict)
# add values one by one
for child in elem:
# catch wrong children
if not child.tag == IPETValue.getNodeTag():
raise AttributeError("Cannot add a child of type {} to a Filter".format(child.tag))
instancename = child.attrib.get("name")
if instancename:
filter_.addChild(IPETValue(instancename))
# check the filter attributes
filter_.checkAttributes()
return filter_
def getName(self):
prefix = self.anytestrun
if self.operator in self.valueoperators:
return "{} value filter (key: {})".format(self.operator, self.datakey)
elif self.operator in self.listoperators:
return "{}-{} list filter (key: {})".format(self.anytestrun, self.operator, self.datakey)
else:
return " ".join(map(str, (prefix, self.expression1, self.operator, self.expression2)))
def set_operator(self, operator):
self.operator = operator
if self.operator in list(IPETComparison.comparisondict.keys()):
self.comparison = IPETComparison(self.operator)
def getEditableAttributes(self):
"""
returns editable attributes depending on the selected operator
if a binary operator is selected, two expressions as left and right hand side of operator must be chosen
For problem operators, no expressions are selectable.
"""
parenteditables = super(IPETFilter, self).getEditableAttributes()
if self.operator in list(IPETComparison.comparisondict.keys()):
return parenteditables + ['operator', 'anytestrun', 'expression1', 'expression2']
else:
return parenteditables + ['operator', 'anytestrun', 'datakey']
@staticmethod
def getNodeTag():
return IPETFilter.nodetag
def getChildren(self):
return self.values
def acceptsAsChild(self, child):
return child.__class__ is IPETValue
def addChild(self, child):
self.values.append(child)
self._updatevalueset = True
def removeChild(self, child):
self.values.remove(child)
self._updatevalueset = True
def getActiveValues(self):
return [x for x in self.values if x.isActive()]
def getRequiredOptionsByAttribute(self, attr):
return self.attribute2Options.get(attr, super(IPETFilter, self).getRequiredOptionsByAttribute(attr))
def checkAndUpdateValueSet(self, dtype = None):
"""Update the value set of this filter if necessary
"""
if not self._updatevalueset:
return
self.valueset = set([x.getValue(dtype) for x in self.getActiveValues()])
updateset = set()
#
# check for test set names among the values
#
for i in self.valueset:
if i in TestSets.getTestSets():
logger.debug("Adding test set {} to value set".format(i))
updateset = updateset.union(set(TestSets.getTestSetByName(i)))
self.valueset = self.valueset.union(updateset)
logger.debug("Complete value set of filter {}:\n{}".format(self.getName(), self.valueset))
self._updatevalueset = False
def applyValueOperator(self, df):
dtype = df.dtypes[0]
self.checkAndUpdateValueSet(dtype)
contained = df.isin(self.valueset)
logger.debug("Contained: {}\nData: {}".format(contained, df))
if self.operator == "keep":
return contained
else:
return ~contained
def isAllDiff(self, x):
valueset = set()
for x_i in x:
if x_i in valueset:
return False
valueset.add(x_i)
return True
def isOneEqual(self, x):
return not self.isAllDiff(x)
def isAllEqual(self, x):
first_x = x.iloc[0]
for x_i in x:
if first_x != x_i:
return False
return True
def isOneDiff(self, x):
return not self.isAllEqual(x)
def applyListOperator(self, df, groupindex):
"""
Apply list operators 'diff' and 'equal' to the datakey.
In combination with the 'anytestrun' attribute, there are
four possibilities in total:
| anytestrun | operator | result |
|------------|----------|--------|
| one |diff |True, if there are at least 2 different values in a group |
| all |diff |True, if all values are different in this group |
| one |equal |True, if at least one value occurs twice in a group |
| all |equal |True, if there is only a single value for this group |
"""
#
# 1. chose the right list function
#
if self.operator == "diff":
if self.anytestrun == "one":
fun = self.isOneDiff
else:
fun = self.isAllDiff
if self.operator == "equal":
if self.anytestrun == "one":
fun = self.isOneEqual
else:
fun = self.isAllEqual
#
# 2. store the original index
#
dfindex = df.set_index(groupindex).index
#
# 3. group by the index and apply the list function
#
f_by_group = df.groupby(groupindex)[self.datakey].apply(fun)
#
# 4. reindex the result to match the original data frame row count
#
f_by_group_as_frame = pd.DataFrame(f_by_group.reindex(index = dfindex, axis = 0))
#
# 5. set the index of the frame to match the original frame's index
#
f_by_group_as_frame.set_index(df.index, inplace = True)
return f_by_group_as_frame
def filterProblem(self, probname, testruns = []):
"""
return True or False depending on the evaluation of the filter operator comparison
"""
# apply an problem operator directly
if self.operator in self.valueoperators:
return self.applyValueOperator(probname)
# evaluate the two expressions and filter according to the anytestrun attribute if one or all match the requirement
for testrun in testruns:
x = self.evaluate(self.expression1, probname, testrun)
y = self.evaluate(self.expression2, probname, testrun)
if self.anytestrun == 'one' and self.comparison.compare(x, y):
return True
elif self.anytestrun == 'all' and not self.comparison.compare(x, y):
return False
if self.anytestrun == 'one':
return False
return True
def storeResult(self, df : pd.DataFrame, filtercol : pd.Series):
"""store a filter result for future reuse
"""
self._storeddf_ = df
self._storedcol_ = filtercol
def getStoredResult(self, df : pd.DataFrame):
"""return the already stored result for this data frame
"""
if self._storeddf_ is df:
return self._storedcol_
else:
return None
def applyFilter(self, df, groupindex = None):
"""Apply the filter to a data frame rowwise
Parameters
----------
df : DataFrame
data frame object containing columns 'expression1' and 'expression2' or 'datakey'
depending on the selected operator
groupindex : list or None
either a list of columns that should be used for groupby operations
(only needed for list operators 'equal' and 'diff')
Returns
-------
booleanseries :
"""
if self.operator in self.listoperators:
filtercol = self.applyListOperator(df, groupindex)
elif self.operator in self.valueoperators:
filtercol = self.applyValueOperator(df[[self.datakey]])
else:
x = self.evaluateValueDataFrame(df, self.expression1)
y = self.evaluateValueDataFrame(df, self.expression2)
try:
x.columns = ["comp"]
except:
pass
try:
y.columns = ["comp"]
except:
pass
filtercol = self.comparison.compare(x, y)
if groupindex is None:
return filtercol
dfindex = df.set_index(groupindex).index
renaming = {i:"{}_filter".format(i) for i in groupindex}
filtercol = filtercol.rename(renaming, axis = 1)
filtercol.index = dfindex
# group the filter by the specified data frame index columns.
if self.anytestrun == "one":
func = np.any
elif self.anytestrun == "all":
func = np.all
fcol_index = filtercol.groupby(filtercol.index).apply(func)
#
# reshape the column to match the original data frame rows
#
fcol = fcol_index.reindex(index = dfindex, axis = 0)
return fcol
def getNeededColumns(self, df):
return [exp for exp in [self.expression1, self.expression2, self.datakey] if exp in df.columns]
def evaluateValueDataFrame(self, df, value):
if value in df.columns:
return df[[value]]
else:
for conversion in [int, float, str]:
try:
return conversion(value)
except ValueError:
pass
return value
def evaluate(self, value, probname, testrun):
if value in testrun.getKeySet():
return testrun.getProblemDataById(probname, value)
else:
for conversion in [int, float, str]:
try:
return conversion(value)
except ValueError:
pass
return value
def filterProblems(self, probnames, testruns = []):
return [self.filterProblem(probname, testruns) for probname in probnames]
def getFilteredList(self, probnames, testruns = []):
return [probname for probname in probnames if self.filterProblem(probname, testruns)]
def toXMLElem(self):
me = ElementTree.Element(IPETFilter.getNodeTag(), self.attributesToStringDict())
for value in self.values:
me.append(value.toXMLElem())
return me
def getDependency(self, i):
if i == 1:
value = self.expression1
else:
value = self.expression2
try:
float(value)
except:
return value
return None
def equals(self, other):
"""Compare this and another filter for equality
"""
if not IpetNode.equals(self, other):
return False
if self.operator == other.operator:
if self.operator not in IPETFilter.valueoperators:
return True
if len(self.values) != len(other.values):
return False
for v1, v2 in zip(self.values, other.values):
if not v1.equals(v2):
return False
return True
class IPETFilterGroup(IpetNode):
"""
represents a list of filters, has a name attribute for quick tabular representation
a filter group collects
"""
nodetag = "FilterGroup"
attribute2options = {"filtertype":["union", "intersection"]}
editableAttributes = ["name", "filtertype"]
# global data frame and index that are reused for several filter groups
_glbdf_ = None
_glbindex_ = None
# intersection row index which can be shared across all filter groups of type "intersection"
_glbinterrows_ = None
def __init__(self, name = None, filtertype = "intersection", active = True):
"""
constructor for a filter group
Parameters:
----------
name : a suitable name for the filter group
filtertype : either 'union' or 'intersection'
active : True or "True" if this filter group should be active, False otherwise
"""
super(IPETFilterGroup, self).__init__(active)
self.name = name
self.filters = []
if filtertype not in ["intersection", "union"]:
raise ValueError("Error: filtertype <%s> must be either 'intersection' or 'union'" % filtertype)
self.filtertype = filtertype
def getEditableAttributes(self):
return super(IPETFilterGroup, self).getEditableAttributes() + self.editableAttributes
def getChildren(self):
return self.filters
def addChild(self, child):
self.addFilter(child)
def acceptsAsChild(self, child):
return child.__class__ is IPETFilter
def removeChild(self, child):
self.filters.remove(child)
@staticmethod
def getNodeTag():
return IPETFilterGroup.nodetag
def getRequiredOptionsByAttribute(self, attr):
return self.attribute2options.get(attr, super(IPETFilterGroup, self).getRequiredOptionsByAttribute(attr))
def addFilter(self, filter_):
"""
add a filter to the list of filters.
Parameters
----------
filter_ : an problem of IPETFilter
"""
self.filters.append(filter_)
def getName(self):
return self.name
def getActiveFilters(self):
return [f for f in self.filters if f.isActive()]
@staticmethod
def setGlobalDataFrameAndIndex(df : pd.DataFrame, index):
"""Set global data frame and index for filtering, that will be reused by each filter group
"""
IPETFilterGroup._glbdf_ = df
IPETFilterGroup._glbindex_ = index
IPETFilterGroup._glbinterrows_ = IPETFilterGroup.computeIntersectionRows(df, index)
@staticmethod
def computeIntersectionRows(df : pd.DataFrame, index):
"""Compute intersection rows of a given data frame and index
Intersection rows denote a boolean index to define the subset of rows of the input frame
considered as "intersection" for filter groups that have the "intersection" type (which is the default).
The intersection row computation is slow and is reused across all
filter groups with the intersection type.
"""
logger.info("Computing rows for intersection groups")
dfindex = df.set_index(index).index
groups = df.groupby(index)
instancecount = groups.apply(len).max()
interrows = groups.apply(lambda x:len(x) == instancecount)
return interrows.reindex(dfindex)
def filterDataFrame(self, df, index):
"""
filters a data frame object as the intersection of all values that match the criteria defined by the filters
"""
activefilters = self.getActiveFilters()
# treat the special case to keep everything quickly
if len(activefilters) == 0 and self.filtertype == "union":
return df
# first, get the highest number of index occurrences. This number must be matched to keep the problem
if self.filtertype == "intersection":
if df is IPETFilterGroup._glbdf_:
intersection_index = IPETFilterGroup._glbinterrows_
else:
intersection_index = IPETFilterGroup.computeIntersectionRows(df, index)
elif self.filtertype == "union":
intersection_index = None
for f_ in activefilters:
# check first if the column has already been stored
storedcol = f_.getStoredResult(df)
if storedcol is not None:
fcol = storedcol
else:
# apply the filter to the data frame rowwise and store the result in a temporary boolean column
fcol = f_.applyFilter(df, index)
if intersection_index is not None:
intersection_index = intersection_index & fcol
else:
intersection_index = fcol
#
# aggregate the single, elementwise filters into a single intersection
# series with one row per index element
#
# intersection_index = pd.concat(index_series, axis = 1).apply(np.all, axis = 1)
lvalues = intersection_index.values
return lvalues
def filterProblem(self, probname, testruns = []):
for filter_ in self.getActiveFilters():
if not filter_.filterProblem(probname, testruns):
return False
return True
def getNeededColumns(self, df):
needed = []
for filter_ in self.filters:
needed += filter_.getNeededColumns(df)
return needed
def toXMLElem(self):
me = ElementTree.Element('FilterGroup', self.attributesToStringDict())
for filter_ in self.filters:
me.append(filter_.toXMLElem())
return me
@staticmethod
def processXMLElem(elem):
"""
inspect and process an xml element
"""
if elem.tag == IPETFilterGroup.getNodeTag():
filtergroup = IPETFilterGroup(**elem.attrib)
for child in elem:
filtergroup.addFilter(IPETFilterGroup.processXMLElem(child))
return filtergroup
elif elem.tag == IPETFilter.getNodeTag():
return IPETFilter.processXMLElem(elem)
@staticmethod
def fromXML(xmlstring):
"""
parse an xml string matching the filter group XML syntax
"""
tree = ElementTree.fromstring(xmlstring)
return IPETFilterGroup.processXMLElem(tree)
@staticmethod
def fromXMLFile(xmlfilename):
"""
parse a file containing an xml string matching the filter group XML representation syntax
"""
tree = ElementTree.parse(xmlfilename)
return IPETFilterGroup.processXMLElem(tree.getroot())
``` |
{
"source": "1ibrary/1ibrary-gzhu",
"score": 2
} |
#### File: 1ibrary/1ibrary-gzhu/manage.py
```python
from yitu import create, db
from flask_migrate import MigrateCommand, Migrate
from flask_script import Manager, Shell
import os
app = create(os.getenv("yitu_cfg") or "default")
@app.teardown_request
def teardown(e):
if e:
db.session.rollback()
db.session.remove()
else:
db.session.commit()
db.session.remove()
manage = Manager(app)
migrate = Migrate(app, db)
manage.add_command("db", MigrateCommand)
manage.add_command("shell", Shell(make_context=lambda: {"app": app, "db": db}))
if __name__ == "__main__":
manage.run()
```
#### File: migrations/versions/525c4fa30db4_.py
```python
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '525c4fa30db4'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('hot_books',
sa.Column('book_id', sa.Integer(), nullable=False),
sa.Column('book_author', sa.Text(), nullable=True),
sa.Column('book_cover', sa.Text(), nullable=True),
sa.Column('book_rate', sa.Integer(), nullable=True),
sa.Column('book_content', sa.Text(), nullable=True),
sa.Column('book_publish', sa.Text(), nullable=True),
sa.Column('book_last_number', sa.Integer(), nullable=True),
sa.Column('book_key', sa.String(length=13), nullable=True),
sa.Column('book_db_id', sa.Integer(), nullable=True),
sa.Column('book_title', sa.Text(), nullable=True),
sa.Column('book_place', sa.Text(), nullable=True),
sa.Column('detail_data', sa.Text(), nullable=True),
sa.Column('hot_id', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('book_id'),
sa.UniqueConstraint('book_db_id')
)
op.create_index(op.f('ix_hot_books_book_key'), 'hot_books', ['book_key'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_hot_books_book_key'), table_name='hot_books')
op.drop_table('hot_books')
# ### end Alembic commands ###
```
#### File: resources/books/main.py
```python
import json
import jpush
from flask_restful import Resource
from yitu import db
from yitu.models.book import Book, HotBook as HBModel
from yitu.models.hot_search import HotSearch as HSModel
from yitu.models.user import User
from yitu.services.douban import Douban
from yitu.services.gzhu.library_search import NcuSearch
from yitu.utils import get_request_params
class HotBook(Resource):
decorators = []
def post(self):
args = get_request_params([
("page", int, True, "json"),
("uid", int, True, "json"),
("timestamp", float, True, "json"),
("token", str, True, "json")
])
page = args["page"]
uid = args["uid"]
timestamp = args["timestamp"]
token = args["token"]
user = User.verify_token(token)
if user is None or user.id_ != uid:
return {
"data": [],
"status": 2,
"msg": "认证失败"
}
try:
pagination = Book.query.filter_by(is_hot=True) \
.order_by(Book.hot_id.desc()) \
.paginate(page, per_page=20, error_out=False).items
return {
"status": 0,
"msg": "搜索成功",
"data": [{
"book_last_number": book.book_last_number,
"book_cover": book.book_cover,
"book_id": book.book_id,
"book_author": json.loads(book.book_author),
"book_title": book.book_title,
"book_db_id": book.book_db_id,
"book_publish": book.book_publish,
"book_rate": book.book_rate
}
for book in pagination]
}
except Exception as e:
return {
"data": [],
"status": 2,
"msg": "数据库溜了"
}
class SearchBook(Resource):
decorators = []
def post(self):
args = get_request_params([
("timestamp", float, True, "json"),
("token", str, True, "json"),
("content", str, True, "json"),
("uid", int, True, "json"),
("type", int, True, "json"),
("page", int, False, "json")
])
timestamp = args["timestamp"]
token = args["token"]
content = args["content"]
uid = args["uid"]
type = args["type"]
user = User.verify_token(token)
if user is None or user.id_ != uid:
return {
"data": [],
"status": 1,
"msg": "认证失败"
}
try:
clear_content = content
if type == 0:
books_of_db = Book.query.filter(Book.book_title.like('%' + clear_content + '%')).paginate(
page=args["page"], per_page=20, error_out=False).items
elif type == 1:
books_of_db = Book.query.filter(Book.book_author.like('%' + clear_content + '%')).paginate(
page=args["page"], per_page=20, error_out=False).items
else:
books_of_db = Book.query.filter(Book.book_publish.like('%' + clear_content + '%')).paginate(
page=args["page"], per_page=20, error_out=False).items
except Exception as e:
return {
"data": [],
"status": 2,
"msg": "数据库溜了"
}
if books_of_db:
return {
"status": 0,
"msg": "搜索成功",
"data": [{
"book_cover": book.book_cover,
"book_id": book.book_id,
"book_rate": book.book_rate,
"book_title": book.book_title,
"book_author": json.loads(book.book_author),
"book_last_number": book.book_last_number,
"book_db_id": book.book_db_id,
"book_publish": book.book_publish
} for book in books_of_db]
}
else:
ncu_search = NcuSearch()
douban = Douban()
data = []
try:
for book_info in ncu_search.get(content, args["page"]):
if book_info["book_key"]:
b = douban.search_by_isbn(book_info["book_key"])
if not b:
continue
book_info.update(b)
b = Book.query.filter_by(book_key=book_info["book_key"]).first()
if b:
continue
new_book = Book(book_author=book_info["book_author"])
new_book.book_cover = book_info["book_cover"]
new_book.book_rate = book_info["book_rate"]
new_book.book_content = book_info["book_content"]
new_book.book_publish = book_info["book_publish"]
new_book.book_last_number = len(
list(filter(lambda x: not x["is_borrowed"], book_info["data"])))
new_book.book_key = book_info["book_key"]
new_book.book_db_id = book_info["book_db_id"]
new_book.book_title = book_info["book_title"]
new_book.detail_data = json.dumps(book_info["data"])
db.session.add(new_book)
db.session.commit()
mydict = {
"book_cover": book_info["book_cover"],
"book_id": new_book.book_id,
"book_rate": book_info["book_rate"],
"book_title": book_info["book_title"],
"book_author": json.loads(book_info["book_author"]),
"book_last_number": new_book.book_last_number,
"book_db_id": book_info["book_db_id"],
"book_publish": book_info["book_publish"]
}
data.append(mydict)
else:
b = douban.search_by_isbn(book_info["book_title"])
if not b:
continue
book_info.update(b)
b = Book.query.filter_by(book_db_id=book_info["book_db_id"]).first()
if b:
continue
new_book = Book(book_author=book_info["book_author"])
new_book.book_cover = book_info["book_cover"]
new_book.book_rate = book_info["book_rate"]
new_book.book_content = book_info["book_content"]
new_book.book_publish = book_info["book_publish"]
new_book.book_last_number = len(
list(filter(lambda x: not x["is_borrowed"], book_info["data"])))
new_book.book_key = book_info["book_key"]
new_book.book_db_id = book_info["book_db_id"]
new_book.book_title = book_info["book_title"]
new_book.detail_data = json.dumps(book_info["data"])
db.session.add(new_book)
db.session.commit()
mydict = {
"book_cover": book_info["book_cover"],
"book_id": new_book.book_id,
"book_rate": book_info["book_rate"],
"book_title": book_info["book_title"],
"book_author": json.loads(book_info["book_author"]),
"book_last_number": new_book.book_last_number,
"book_db_id": book_info["book_db_id"],
"book_publish": book_info["book_publish"]
}
data.append(mydict)
return {
"status": 0,
"msg": "搜索成功",
"data": data
}
except Exception as e:
print(e)
return {
"data": [],
"status": 3,
"msg": "服务器溜了"
}
class ShowDetail(Resource):
decorators = []
def post(self):
args = get_request_params([
("timestamp", float, True, "json"),
("book_db_id", int, True, "json"),
("token", str, True, "json"),
("book_id", int, True, "json"),
("uid", int, True, "json"),
])
timestamp = args["timestamp"]
book_db_id = args["book_db_id"]
token = args["token"]
book_id = args["book_id"]
uid = args["uid"]
user = User.verify_token(token)
if user is None or user.id_ != uid:
return {
"data": [],
"status": 2,
"msg": "认证失败"
}
try:
the_book = Book.query.filter_by(book_id=book_id).first()
if not the_book:
return {
"status": 0,
"message": "搜索成功",
"data": None
}
the_detail_data = json.loads(the_book.detail_data)
return {
"status": 0,
"msg": "搜索成功",
"data": {
"book_rate": the_book.book_rate,
"book_content": the_book.book_content,
"book_publish": the_book.book_publish,
"book_last_number": the_book.book_last_number,
"book_key": the_book.book_key,
"book_db_id": the_book.book_db_id,
"book_title": the_book.book_title,
"detail_data": the_detail_data,
"book_author": json.loads(the_book.book_author),
"book_place": None if len(the_detail_data) == 0 else the_detail_data[0]["detail_place"],
"book_id": the_book.book_id,
"book_cover": the_book.book_cover,
"is_subscribe": 1 if uid in the_book.subscribers else 0
}
}
except Exception as e:
return {
"data": [],
"status": 2,
"msg": "服务器溜了"
}
class Subscribe_(Resource):
def post(self):
args = get_request_params([
("timestamp", float, True, "json"),
("token", str, True, "json"),
("book_id", int, True, "json"),
("uid", int, True, "json")
])
timestamp = args["timestamp"]
token = args["token"]
book_id = args["book_id"]
uid = args["uid"]
def _push_msg(message, device_id):
app_key = 'app_key'
master_secret = 'master_key'
_jpush = jpush.JPush(app_key, master_secret)
push = _jpush.create_push()
# push.audience = jpush.audience([{"registration_id":device_id}])
push.audience = {'registration_id': [device_id]}
# push.audience = device_id
android_msg = jpush.android(
message,
None,
None,
{
"msg": message, # 强行套用app中notification的相关格式
"status": 0
}
)
ios_msg = jpush.ios(
message,
None,
None,
{
"msg": message, # 强行套用app中notification的相关格式
"status": 0
}
)
push.notification = jpush.notification("hello jpush", ios_msg, android_msg, None)
# push.options = {"time_to_live": 86400, "sendno": 12345, "apns_production":True}
push.options = {"time_to_live": 86400, "apns_production": True}
push.platform = jpush.platform("all")
push.send()
the_book = Book.query.filter_by(book_id=book_id).first()
the_detail_data = json.loads(the_book.detail_data)
flag = 0
for a_book in the_detail_data:
if a_book["is_borrowed"] == 1:
flag = 1
if flag == 1:
_push_msg("有书了", uid)
class HotSearch(Resource):
def post(self):
hs = HSModel.query.all()
return {
"status": 0,
"msg": "获取成功",
"data": [k.name for k in hs]
}
```
#### File: services/gzhu/book_details.py
```python
import requests
from bs4 import BeautifulSoup
import re
class BookDetails(object):
def __init__(self):
self.url = "http://lib.gzhu.edu.cn:8080/bookle/search2/detail/{0}?index=default&source=biblios"
pass
def get_details(self, uid):
url = self.url.format(uid)
data = requests.get(url)
data.encoding = "utf-8"
soup = BeautifulSoup(data.text, "lxml")
isbn_pattern = re.compile('douban.com/isbn/(.*?)/', re.S)
try:
isbn = re.findall(isbn_pattern, data.text)
book_key = isbn[0] if isbn else None
book_key = book_key.replace("-", "")
except Exception as e:
book_key = None
info = soup.select_one("table.book_holding")
info_tr = [b.find_all("td") for b in info.find_all("tr")[1:]]
detail_of_books = [
dict(zip(("detail_key", "detail_place", "is_borrowed"),
[a.text.strip().replace('\r\n \r\n [架位]','') for a in [b[0]] + [b[4]] + [b[1]]]
)
)
for b in info_tr]
id_ = 1
for the_book in detail_of_books:
the_book["detail_id"] = id_
id_ = id_ + 1
if the_book["is_borrowed"][:2] == '借出':
the_book["is_borrowed"] = 1
else:
the_book["is_borrowed"] = 0
return {
"data": detail_of_books,
"book_key": book_key,
}
```
#### File: services/gzhu/hotbook.py
```python
import json
import requests
from bs4 import BeautifulSoup
from yitu import db
from yitu.models.book import HotBook, Book
from yitu.services.douban import Douban
from yitu.services.gzhu.book_details import BookDetails
class HotBooks(object):
def __init__(self):
self.url = "http://192.168.127.12/top/top_lend.php?cls_no={0}"
pass
def get(self, category="ALL"):
"""
获取热门排行
:parameter category 分类
:return:
"""
db.session.execute("DELETE FROM hot_books WHERE is_hot=1")
url = self.url.format(category)
data = requests.get(url)
data.encoding = "utf-8"
soup = BeautifulSoup(data.text, "lxml")
tbody = soup.find("table")
books_tr = tbody.find_all("tr")[1:]
books = [b.find_all("td") for b in books_tr]
books_info = [{
"hot_id": int(b[0].text),
"book_title": b[1].text,
"book_publish": b[3].text,
"book_last_number": int(b[5].text) - int(b[6].text),
"uid": b[1].find("a").attrs["href"][-10:]
}
for b in books]
douban = Douban()
book_details = BookDetails()
for book_info in books_info:
book_info.update(book_details.get_details(book_info["uid"]))
if book_info["book_key"]:
b = douban.search_by_isbn(book_info["book_key"])
else:
b = douban.search_by_else(book_info["book_title"])
if not b:
continue
book_info.update(b)
b = Book.query.filter_by(book_db_id=book_info["book_db_id"]).first()
if not b:
book = Book(book_author=book_info["book_author"])
book.book_cover = book_info["book_cover"]
book.book_rate = book_info["book_rate"]
book.book_content = book_info["book_content"]
book.book_publish = book_info["book_publish"]
book.book_last_number = len(book_info["data"])
book.book_key = book_info["book_key"]
book.book_db_id = book_info["book_db_id"]
book.book_title = book_info["book_title"]
book.detail_data = json.dumps(book_info["data"])
book.hot_id = book_info["hot_id"]
book.book_last_number = book_info["book_last_number"]
book.is_hot = True
db.session.add(book)
db.session.commit()
def task():
h = HotBooks()
h.get()
print("task run")
``` |
{
"source": "1ierro1ast/flaskProjectCreator",
"score": 3
} |
#### File: 1ierro1ast/flaskProjectCreator/siteCreator.py
```python
import sys
import os
from llib import install
prefx = ""
def openFile(filename):
with open(filename,"r",encoding="UTF-8") as f:
return f.read()
def saveToFile(filename,data,mode="w"):
with open(filename,mode,encoding="UTF-8") as f:
f.write(data)
return True
def createTree(root,dirlist=["templates","static","src"],staticdirlist=["images","scripts"]):
os.mkdir(prefx+root)
for i in dirlist:
os.mkdir(prefx+root+"/"+i)
if i == "static":
for j in staticdirlist:
os.mkdir(prefx+root+"/"+i+"/"+j)
return True
def createIndexs(root, pathToCSS="{{ url_for('static', filename='style.css') }}", pathToJS="{{ url_for('static', filename='scripts/main.js') }}"):
indexPage = openFile("pattern.html")
indexPage = indexPage.replace("[title]",root)
indexPage = indexPage.replace("[pathToCSS]",pathToCSS)
indexPage = indexPage.replace("[pathToJS]",pathToJS)
saveToFile(prefx+root+"/templates/index.html", indexPage)
css = "/*place style here*/"
js = "//place javascript here"
app = openFile("pattern.py")
saveToFile(prefx+root+"/static/style.css", css)
saveToFile(prefx+root+"/static/scripts/main.js", js)
saveToFile(prefx+root+"/app.py", app)
return True
def main():
try:
if sys.argv[2] == "-f":
print("====|Flask install enable|====")
install("flask")
except IndexError:
print("====|Flask install disable|====")
try:
root=str(sys.argv[1])
except IndexError:
print("====|Use \"python siteCreate.py [Name] [-f]\"|====")
try:
createTree(root)
createIndexs(root)
print("====|Project created!|====")
except FileExistsError:
print("====|Project \""+root+"\" is exists!|====")
except UnboundLocalError:
pass
if __name__ == "__main__":
sys.exit(main())
``` |
{
"source": "1illeke/myownrank-cogs",
"score": 2
} |
#### File: myownrank-cogs/ticketer/__init__.py
```python
from .ticketer import Ticketer
async def setup(bot):
cog = Ticketer()
await cog.register_casetypes()
bot.add_cog(cog)
```
#### File: myownrank-cogs/ticketer/ticketer.py
```python
import discord
from typing import Optional
from datetime import datetime
from redbot.core import commands, checks, Config, modlog
class Ticketer(commands.Cog):
"""Ticketer"""
def __init__(self):
self.config = Config.get_conf(self, 200730042020, force_registration=True)
default_guild = {
"channel": None,
"use_counter": False,
"closed_category": None,
"open_category": None,
"current_ticket": 0,
"role": None,
"message": "Your ticket has been created. You can add information by typing in this channel. \n\nA member of the ticket-handling-team will be with you as soon as they can.",
"active": [],
"modlog": True,
"closed": [],
}
self.config.register_guild(**default_guild)
@staticmethod
async def register_casetypes():
new_types = [
{
"name": "ticket_created",
"default_setting": True,
"image": "\N{BALLOT BOX WITH BALLOT}\N{VARIATION SELECTOR-16}",
"case_str": "Ticket created",
}
]
await modlog.register_casetypes(new_types)
@commands.group()
@checks.admin()
async def ticketer(self, ctx):
"""All ticketer settings."""
pass
@ticketer.command()
async def channel(self, ctx, channel: discord.TextChannel):
"""Set the ticket-management channel."""
await self.config.guild(ctx.guild).channel.set(channel.id)
await ctx.send(f"Channel has been set to {channel.mention}.")
@ticketer.command()
async def role(self, ctx, role: discord.Role):
"""Set the role for ticket managers."""
await self.config.guild(ctx.guild).role.set(role.id)
await ctx.send(f"Ticket manager role has been set to {role.mention}.")
@ticketer.group()
async def category(self, ctx):
"""Set the categories for open and closed tickets."""
@category.group()
async def open(self, ctx, category: discord.CategoryChannel):
"""Set the category for open tickets."""
await self.config.guild(ctx.guild).open_category.set(category.id)
await ctx.send(f"Category for open tickets has been set to {category.mention}")
@category.group()
async def closed(self, ctx, category: discord.CategoryChannel):
"""Set the category for open tickets."""
await self.config.guild(ctx.guild).closed_category.set(category.id)
await ctx.send(f"Category for closed tickets has been set to {category.mention}")
@ticketer.command()
async def message(self, ctx, *, message: str):
"""Set the message that is shown at the start of each ticket channel."""
await self.config.guild(ctx.guild).message.set(message)
await ctx.send(f"The message has been set to ``{message}``.")
@ticketer.command()
async def counter(self, ctx, true_or_false: bool):
"""Toggle if the ticket channels should be named using a user's name and ID or counting upwards starting at 0."""
await self.config.guild(ctx.guild).use_counter.set(true_or_false)
await ctx.send(
"The counter has been {}.".format("enabled" if true_or_false else "disabled")
)
@ticketer.command()
async def modlog(self, ctx, true_or_false: bool):
"""Decide if ticketer should log to modlog."""
await self.config.guild(ctx.guild).modlog.set(true_or_false)
await ctx.send(
"Logging to modlog has been {}.".format("enabled" if true_or_false else "disabled")
)
@ticketer.command()
async def quicksetup(self, ctx):
settings = await self.config.guild(ctx.guild).all()
if not settings["role"]:
role = await ctx.guild.create_role(
name="Ticketmanagers", hoist=True, mentionable=False, reason="Ticketer quicksetup"
)
await self.config.guild(ctx.guild).role.set(role.id)
await ctx.send("Ticket-manager role created.")
if not settings["open_category"]:
category = await ctx.guild.create_category(
name="Open-tickets", reason="Ticketer quicksetup"
)
await self.config.guild(ctx.guild).open_category.set(category.id)
await ctx.send("Category for open tickets created.")
if not settings["closed_category"]:
category = await ctx.guild.create_category(
name="Closed-tickets", reason="Ticketer quicksetup"
)
await self.config.guild(ctx.guild).closed_category.set(category.id)
await ctx.send("Category for closed tickets created.")
settings = await self.config.guild(ctx.guild).all()
if not settings["channel"]:
await ctx.send("Config queried for channel setup.")
overwrite = {
ctx.guild.default_role: discord.PermissionOverwrite(read_messages=False),
ctx.guild.get_role(settings["role"]): discord.PermissionOverwrite(
read_messages=True,
send_messages=True,
embed_links=True,
attach_files=True,
manage_messages=True,
),
}
channel = await ctx.guild.create_text_channel(
"ticket-management",
overwrites=overwrite,
category=ctx.guild.get_channel(settings["open_category"]),
topic="Ticket management channel.",
reason="Ticketer quicksetup",
)
await self.config.guild(ctx.guild).channel.set(channel.id)
await ctx.send("Channel for ticket management created.")
await ctx.send("Checking settings...")
if await self._check_settings(ctx):
await ctx.send("Quicksetup completed.")
else:
await ctx.send("Something went wrong...")
@ticketer.command()
async def purge(self, ctx, are_you_sure: Optional[bool]):
if are_you_sure:
async with self.config.guild(ctx.guild).closed() as closed:
for channel in closed:
try:
await ctx.guild.get_channel(channel).delete(reason="Ticket purge")
closed.remove(channel)
except discord.Forbidden:
await ctx.send(
f"I could not delete channel ID {channel} because I don't have the required permissions."
)
except discord.NotFound:
closed.remove(channel)
except discord.HTTPException:
await ctx.send("Something went wrong. Aborting.")
return
else:
await ctx.send(
f"This action will permanently delete all closed ticket channels.\nThis action is irreversible.\nConfirm with ``{ctx.clean_prefix}ticketer purge true``"
)
@commands.group()
async def ticket(self, ctx):
"""Manage a ticket."""
pass
@ticket.command(aliases=["open"])
async def create(
self, ctx, *, reason: Optional[str] = "No reason provided.",
):
"""Create a ticket."""
if await self._check_settings(ctx):
settings = await self.config.guild(ctx.guild).all()
if settings["use_counter"]:
name = f"ticket-{settings['current_ticket']}"
await self.config.guild(ctx.guild).current_ticket.set(
settings["current_ticket"] + 1
)
else:
name = f"{ctx.author.name}-{ctx.author.id}"
found = False
for channel in ctx.guild.channels:
if channel.name == name.lower():
found = True
if not found:
if settings["modlog"]:
await modlog.create_case(
ctx.bot,
ctx.guild,
ctx.message.created_at,
action_type="ticket_created",
user=ctx.author,
moderator=ctx.author,
reason=reason,
)
overwrite = {
ctx.guild.default_role: discord.PermissionOverwrite(read_messages=False),
ctx.author: discord.PermissionOverwrite(
read_messages=True,
send_messages=True,
embed_links=True,
attach_files=True,
),
ctx.guild.get_role(settings["role"]): discord.PermissionOverwrite(
read_messages=True,
send_messages=True,
embed_links=True,
attach_files=True,
manage_messages=True,
),
}
ticketchannel = await ctx.guild.create_text_channel(
name,
overwrites=overwrite,
category=ctx.guild.get_channel(settings["open_category"]),
topic=reason,
)
await ticketchannel.send(settings["message"])
embed = discord.Embed(
title=name, description=reason, timestamp=datetime.utcnow(),
).set_footer(text="Last updated at:")
message = await ctx.guild.get_channel(settings["channel"]).send(embed=embed)
async with self.config.guild(ctx.guild).active() as active:
active.append((ticketchannel.id, message.id))
else:
await ctx.send("You already have an open ticket.")
else:
await ctx.send("Please finish the setup process before creating a ticket.")
@ticket.command()
async def close(self, ctx):
"""Close a ticket."""
settings = await self.config.guild(ctx.guild).all()
active = settings["active"]
success = False
for ticket in active:
if ctx.channel.id in ticket:
new_embed = (
await ctx.guild.get_channel(settings["channel"]).fetch_message(ticket[1])
).embeds[0]
new_embed.add_field(
name=datetime.utcnow().strftime("%H:%m UTC"),
value=f"Ticket closed by {ctx.author.name}#{ctx.author.discriminator}",
)
new_embed.timestamp = datetime.utcnow()
await (
await ctx.guild.get_channel(settings["channel"]).fetch_message(ticket[1])
).edit(
embed=new_embed, delete_after=10,
)
await ctx.send(embed=new_embed)
await ctx.send(
"This ticket can no longer be edited using ticketer.", delete_after=30
)
await ctx.channel.edit(
category=ctx.guild.get_channel(settings["closed_category"]),
name=f"{ctx.channel.name}-c-{datetime.utcnow().strftime('%B-%d-%Y-%H-%m')}",
overwrites={
ctx.guild.default_role: discord.PermissionOverwrite(read_messages=False),
ctx.guild.get_role(settings["role"]): discord.PermissionOverwrite(
read_messages=True,
send_messages=True,
embed_links=True,
attach_files=True,
manage_messages=True,
),
},
)
await ctx.send("Ticket closed.")
active.remove(ticket)
async with self.config.guild(ctx.guild).closed() as closed:
closed.append(ticket[0])
success = True
if not success:
await ctx.send("This is not a ticket channel.")
await self.config.guild(ctx.guild).active.set(active)
@ticket.command()
@checks.mod()
async def update(self, ctx, ticket: Optional[discord.TextChannel] = None, *, update: str):
"""Update a ticket. This is visible to all participants of the ticket."""
if ticket is None:
channel = ctx.channel
else:
channel = ticket
settings = await self.config.guild(ctx.guild).all()
active = settings["active"]
for ticket in active:
if channel.id in ticket:
await channel.edit(
topic=f'{channel.topic}\n\n{ctx.author.name}#{ctx.author.discriminator}:"{update}"'
)
await ctx.send("Ticket updated.", delete_after=10)
else:
ctx.send(f"{channel.mention} is not a ticket channel.")
@ticket.command()
@checks.mod()
async def note(self, ctx, ticket: discord.TextChannel, *, note: str):
"""Add a staff-only note to a ticket."""
channel = ticket
for ticket in await self.config.guild(ctx.guild).active():
if channel.id in ticket:
message = await ctx.guild.get_channel(
await self.config.guild(ctx.guild).channel()
).fetch_message(ticket[1])
new_embed = message.embeds[0]
new_embed.add_field(
name=f"{ctx.author.name}#{ctx.author.discriminator}", value=note
)
new_embed.timestamp = datetime.utcnow()
await message.edit(embed=new_embed)
await ctx.send("Note added.", delete_after=10)
else:
await ctx.send("This is not a ticket channel.")
async def _check_settings(self, ctx: commands.Context) -> bool:
settings = await self.config.guild(ctx.guild).all()
count = 0
if settings["channel"]:
count += 1
else:
await ctx.send("Management channel not set up yet.")
if settings["closed_category"]:
count += 1
else:
await ctx.send("Category for closed tickets not set up yet.")
if settings["open_category"]:
count += 1
else:
await ctx.send("Category for open tickets not set up yet.")
if settings["role"]:
count += 1
else:
await ctx.send("Ticket manager role not set up yet.")
if count == 4:
return True
else:
return False
```
#### File: myownrank-cogs/welcome/__init__.py
```python
from redbot.core.bot import Red
from .welcome import Welcome
def setup(bot: Red):
bot.add_cog(Welcome())
``` |
{
"source": "1illeke/novi2",
"score": 2
} |
#### File: novi2/datautils/datautils.py
```python
import string
import unicodedata
from asyncio import TimeoutError as AsyncTimeoutError
from textwrap import shorten
from types import SimpleNamespace
from typing import Optional, Union
import discord
import tabulate
from redbot.core import checks, commands
from redbot.core.i18n import Translator, cog_i18n
from redbot.core.utils import AsyncIter
from redbot.core.utils import chat_formatting as chat
from redbot.core.utils.menus import DEFAULT_CONTROLS, menu
from redbot.core.utils.predicates import ReactionPredicate
def bool_emojify(bool_var: bool) -> str:
return "✅" if bool_var else "❌"
T_ = Translator("DataUtils", __file__)
_ = lambda s: s
TWEMOJI_URL = "https://twemoji.maxcdn.com/v/latest/72x72"
APP_ICON_URL = "https://cdn.discordapp.com/app-icons/{app_id}/{icon_hash}.png"
NON_ESCAPABLE_CHARACTERS = string.ascii_letters + string.digits
GUILD_FEATURES = {
"VIP_REGIONS": _("384kbps voice bitrate"),
"VANITY_URL": _("Vanity invite URL"),
"INVITE_SPLASH": _("Invite splash{splash}"),
"VERIFIED": _("Verified"),
"PARTNERED": _("Discord Partner"),
"MORE_EMOJI": _("Extended emoji limit"), # Non-boosted?
"DISCOVERABLE": _("Shows in Server Discovery{discovery}"),
# "FEATURABLE": _('Can be in "Featured" section of Server Discovery'),
"COMMERCE": _("Store channels"),
"NEWS": _("News channels"),
"BANNER": _("Banner{banner}"),
"ANIMATED_ICON": _("Animated icon"),
"WELCOME_SCREEN_ENABLED": _("Welcome screen"),
"PUBLIC_DISABLED": _("Cannot be public"),
"ENABLED_DISCOVERABLE_BEFORE": _("Was in Server Discovery"),
"COMMUNITY": _("Community server"),
# Docs from https://github.com/vDelite/DiscordLists:
"PREVIEW_ENABLED": _('Preview enabled ("Lurkable")'),
"MEMBER_VERIFICATION_GATE_ENABLED": _("Member verification gate enabled"),
"MEMBER_LIST_DISABLED": _("Member list disabled"),
# im honestly idk what the fuck that shit means, and discord doesnt provides much docs,
# so if you see that on your server while using my cog - idk what the fuck is that and how it got there,
# ask discord to write fucking docs already
"FORCE_RELAY": _(
"Shards connections to the guild to different nodes that relay information between each other."
),
}
ACTIVITY_TYPES = {
discord.ActivityType.playing: _("Playing"),
discord.ActivityType.watching: _("Watching"),
discord.ActivityType.listening: _("Listening to"),
discord.ActivityType.competing: _("Competing in"),
}
CHANNEL_TYPE_EMOJIS = {
discord.ChannelType.text: "\N{SPEECH BALLOON}",
discord.ChannelType.voice: "\N{SPEAKER}",
discord.ChannelType.category: "\N{BOOKMARK TABS}",
discord.ChannelType.news: "\N{NEWSPAPER}",
discord.ChannelType.store: "\N{SHOPPING TROLLEY}",
discord.ChannelType.private: "\N{BUST IN SILHOUETTE}",
discord.ChannelType.group: "\N{BUSTS IN SILHOUETTE}",
}
_ = T_
async def get_twemoji(emoji: str):
emoji_unicode = []
for char in emoji:
char = hex(ord(char))[2:]
emoji_unicode.append(char)
if "200d" not in emoji_unicode:
emoji_unicode = list(filter(lambda c: c != "fe0f", emoji_unicode))
emoji_unicode = "-".join(emoji_unicode)
return f"{TWEMOJI_URL}/{emoji_unicode}.png"
async def find_app_by_name(where: list, name: str):
async for item in AsyncIter(where):
for k, v in item.items():
if v == name:
return item
@cog_i18n(_)
class DataUtils(commands.Cog):
"""Commands for getting information about users or servers."""
__version__ = "2.4.18"
# noinspection PyMissingConstructor
def __init__(self, bot):
self.bot = bot
self.TIME_FORMAT = _("%d.%m.%Y %H:%M:%S %Z")
async def red_delete_data_for_user(self, **kwargs):
return
@commands.command(aliases=["info", "i"])
@commands.guild_only()
@checks.bot_has_permissions(embed_links=True)
async def uinfo(self, ctx, *, member: discord.Member = None):
"""Information on a user"""
if member is None:
member = ctx.message.author
em = discord.Embed(
title=chat.escape(str(member), formatting=True),
color=member.color.value and member.color or discord.Embed.Empty,
)
if member.nick:
em.add_field(name=_("Nickname"), value=member.nick)
else:
em.add_field(name=_("Name"), value=member.name)
em.add_field(name=_("Joined server"), value=member.joined_at.strftime(self.TIME_FORMAT))
if member.premium_since:
em.add_field(
name=_("Boosted server"),
value=member.premium_since.strftime(self.TIME_FORMAT)
),
await ctx.send(embed=em)
``` |
{
"source": "1IllI1/BBS_MIGRATE",
"score": 2
} |
#### File: app/home/views.py
```python
from . import home
from flask import render_template, redirect, url_for, flash, session, request,current_app
from app.home.forms import RegistForm, LoginForm, UserdetailForm, PwdForm, CommentForm, PostForm
from app.models import User, UserLoginLog, Comment, Post,Col
from werkzeug.security import generate_password_hash
from werkzeug.utils import secure_filename
import uuid
from app import db
from app.home.email import send_mail
from functools import wraps
import time
import os
# 定义用户登录判断装饰器
def user_login_req(func):
@wraps(func)
def decorated_function(*args, **kwargs):
# session不存在时请求登录
if "user" not in session:
return redirect(url_for("home.user_login", next=request.url))
return func(*args, **kwargs)
return decorated_function
# html测试路由
@home.route('/usefortest/')
def ust():
return render_template('home/USERFORTEST.html')
# 首页路由
@home.route('/')
def index():
# posts = Post.query.all()
current_user_id = 0
current_user_name =""
if "user" in session:
current_user_name = session["user"]
user = User.query.filter_by(name=current_user_name).first()
current_user_id = user.id
page_index = request.args.get('page', 1, type=int)
query = Post.query.join(User).filter(User.id == Post.user_id).order_by(Post.addtime.desc())
pagination = query.paginate(page_index, per_page=10, error_out=False)
posts = pagination.items
return render_template('home/index.html', posts=posts, pagination=pagination,current_user_name=current_user_name,current_user_id=current_user_id)
#首页删除个人发布的内容
@home.route("/index/del/")
@user_login_req
def index_del():
#获取当前登录用户id
current_user_name = session["user"]
user = User.query.filter_by(name=current_user_name).first()
current_user_id = user.id
index_id = request.args.get("id", '0')
post = Post.query.get_or_404(int(index_id))
if post.user_id != current_user_id:
flash("删除不合法")
return redirect(url_for("home.index"))
db.session.delete(post)
db.session.commit()
flash("删除成功")
return redirect(url_for("home.index"))
#设置帖子关注
@home.route("/index/col/")
@user_login_req
def index_col():
#获取当前登录用户id
current_user_name = session["user"]
user = User.query.filter_by(name=current_user_name).first()
current_user_id = user.id
index_id = request.args.get("id", '0')
col_check = Col.query.filter_by(id=index_id).count()
if col_check == 0:
col=Col(
post_id=index_id,
user_id=current_user_id
)
db.session.add(col)
db.session.commit()
flash("收藏成功","ok")
flash("收藏已存在","err")
return redirect(url_for("home.index"))
#设置评论关注
@home.route("/play/col/")
@user_login_req
def play_col():
#获取当前登录用户id
current_user_name = session["user"]
user = User.query.filter_by(name=current_user_name).first()
current_user_id = user.id
index_id = request.args.get("id", '0')
col_check = Col.query.filter_by(id=index_id).count()
if col_check == 0:
col=Col(
comment_id=index_id,
user_id=current_user_id
)
db.session.add(col)
db.session.commit()
flash("收藏成功","ok")
flash("收藏已存在","err")
return redirect(url_for("home.index"))
#
# from io import BytesIO
# from . import verify_code
# @home.route('/code')
# def get_code():
# image, code = verify_code.get_verify_code()
# # 图片以二进制形式写入
# buf = BytesIO()
# image.save(buf, 'jpeg')
# buf_str = buf.getvalue()
# # 把buf_str作为response返回前端,并设置首部字段
# response = verify_code.make_response(buf_str)
# response.headers['Content-Type'] = 'image/gif'
# # 将验证码字符串储存在session中
# session['image'] = code
# return response
@home.route('/activate/<token>')
def activate(token):
#验证token 提取id
if User.check_active_token(token):
flash("账户已经激活")
return redirect(url_for("home.user_login"))
else:
flash("激活失败")
return redirect(url_for("home.index"))
# 登录路由
@home.route("/login/", methods=["POST", "GET"])
def user_login():
form = LoginForm()
if form.validate_on_submit():
data = form.data
user = User.query.filter_by(name=data["name"]).first()
print("登录按钮被点击")
# if session.get('image').lower() != form.verify_code.data.lower():
# flash('验证码错误')
# return render_template('home/user_login.html', form=form)
print("用户激活状态"+str(user.activate))
if user.activate:
if not user.check_pwd(data["pwd"]):
flash("用户名或密码错误!")
return redirect(url_for("home.user_login"))
session["user"] = data["name"]
#session["user_id"] = user.id
userloginlog = UserLoginLog(
user_id=user.id,
ip=request.remote_addr,
addtime=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
)
db.session.add(userloginlog)
db.session.commit()
return redirect(request.args.get('next') or url_for("home.index"))
else:
flash("用户尚未激活,请激活以后再登录")
return render_template('home/user_login.html', form=form)
# 登出路由
@home.route("/logout/")
@user_login_req
def logout():
session.pop("user")
return redirect(url_for("home.user_login"))
# 会员注册
@home.route("/register/", methods=['GET', "POST"])
def register():
form = RegistForm()
if form.validate_on_submit():
data = form.data
user = User(
name=data["name"],
email=data["email"],
phone=data["phone"],
pwd=generate_password_hash(data["pwd"]),
uuid=uuid.uuid4().hex,
addtime=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
)
print(user)
check = User.query.filter_by(name=data["name"]).count()
if check == 0:
db.session.add(user)
db.session.commit()
print("用户数据提交到数据库")
token = user.generate_active_token()
# 发送用户账户激活的邮件
send_mail(user.email, '激活您的账户', 'email/activate', username=user.name, token=token)
# 弹出消息 提示用户
flash("注册成功,请点击邮件中的链接完成激活",'ok')
return redirect(url_for("home.user_login"))
flash("用户名已存在","err")
return render_template('home/register.html', form=form)
# 修改文件名称
def change_filename(filename):
fileinfo = os.path.splitext(filename) # 对名字进行前后缀分离
#注意此处datetime.now()
filename = time.strftime("%Y%m%d%H%M%S") + "_" + fileinfo[-1] # 生成新文件名
return filename
# 用户中心
@home.route("/user/", methods=["GET", "POST"])
@user_login_req
def user():
form = UserdetailForm()
user = User.query.filter_by(name=(session["user"])).first()
if user.face is not None:
form.face.validators = []
if request.method == "GET":
form.name.data = user.name
form.email.data = user.email
form.phone.data = user.phone
form.info.data = user.info
if form.validate_on_submit():
print('button pressed')
data = form.data
# if data["name"] != user.name and name_count == 1:
# flash("用户名已被占用")
# return redirect(url_for("home.user"))
if request.method == 'POST':
if request.files['imageup']:
file = request.files['imageup']
print("获取文件成功")
filename = secure_filename(str(hash(file.filename)))+str(user.id)+".jpg"
print("secure成功"+filename)
del_face = user.face
file.save(os.path.join(current_app.config['UP_DIR']+os.sep+"users",filename))
print("上传成功" + filename)
#os.remove(os.path.join(app.config['UP_DIR'] + os.sep+"users", del_face))
print("删除文件"+del_face+"成功")
user.face = filename
user.name=data["name"]
user.email=data["email"]
user.phone=data["phone"]
user.info=data["info"]
db.session.add(user)
db.session.commit()
flash("修改成功!")
return redirect(url_for("home.user"))
flash("失败")
return render_template('home/user.html', form=form, user=user)
@home.route("/pwd/", methods=["GET", "POST"])
@user_login_req
def pwd():
form = PwdForm()
if form.validate_on_submit():
data = form.data
user = User.query.filter_by(name=session["user"]).first()
user.pwd = <PASSWORD>_password_hash(data["new_pwd"])
db.session.add(user)
db.session.commit()
flash("修改密码成功,请重新登录!", "ok")
return redirect(url_for("home.logout"))
return render_template('home/pwd.html', form=form)
# 会员中心评论列表 评论功能在paly路由中
@home.route("/comments/")
@user_login_req
def comments():
user_name = session["user"]
user = User.query.filter_by(name=user_name).first()
page = request.args.get('page', 1, type=int)
# query = Comment.query.order_by(Comment.addtime.desc())
query = Comment.query.filter(Comment.user_id == user.id).order_by(Comment.addtime.desc())
pagination = query.paginate(page, per_page=10, error_out=False)
comments = pagination.items
return render_template('home/comments.html', user=user,user_name=user_name, comments=comments,pagination=pagination)
@home.route("/comments/del/")
@user_login_req
def comment_del():
comment_id = request.args.get("id", '')
comment = Comment.query.get_or_404(int(comment_id))
db.session.delete(comment)
db.session.commit()
flash("评论删除成功")
return redirect(url_for("home.comments"))
@home.route("/postrecords/")
@user_login_req
def postrecords():
user_name = session["user"]
user = User.query.filter_by(name=user_name).first()
user_id = user.id
user = User.query.filter_by(id=user_id).first()
page = request.args.get('page', 1, type=int)
# query = Comment.query.order_by(Comment.addtime.desc())
query = Post.query.filter(Post.user_id == user_id).order_by(Post.addtime.desc())
pagination = query.paginate(page, per_page=5, error_out=False)
posts = pagination.items
return render_template('home/post_records.html', user=user,user_name=user_name, posts=posts, pagination=pagination)
@home.route("/postrecords/del/")
@user_login_req
def post_del():
post_id = request.args.get("id", '')
post = Post.query.get_or_404(int(post_id))
comment = Comment.query.filter_by(post_id=post_id).all()
db.session.delete(post)
db.session.commit()
db.session.delete(comment)
db.session.commit()
flash("主题帖删除成功")
return redirect(url_for("home.postrecords"))
@home.route("/loginlog/", methods=["POST", "GET"])
@user_login_req
def loginlog():
current_user_name = session["user"]
user = User.query.filter_by(name=current_user_name).first()
user_login_log = UserLoginLog.query.filter_by(
user_id=user.id
).order_by(
UserLoginLog.addtime.desc()
# 此处限制了查寻到的登录日志为前15条
).limit(15).all()
return render_template("home/loginlog.html", user_login_log=user_login_log)
@home.route("/col/del/")
@user_login_req
def col_del():
current_user_name = session["user"]
user= User.query.filter_by(name=current_user_name).first()
current_user_id = user.id
col_id = request.args.get("id", '')
col = Col.query.get_or_404(int(col_id))
if col.user_id != current_user_id:
flash("收藏删除不合法")
return redirect(url_for("home.col"))
db.session.delete(col)
db.session.commit()
flash("收藏删除成功")
return redirect(url_for("home.col"))
##会员中心收藏列表
@home.route("/col/")
@user_login_req
def col():
current_user_name = session["user"]
user = User.query.filter_by(name=current_user_name).first()
user_id = user.id
# 获取当前分页页面编号(编号,默认值,类型)
page = request.args.get('page', 1, type=int)
# 从数据库中查找对应用户的收藏
#query =Col.query.filter_by(user_id =user_id).order_by(Col.addtime.desc())
query = Col.query.join(Post).join(User).filter(Col.user_id==user_id,Col.post_id == Col.post_id).order_by(Col.addtime.desc())
# 对当前贴的评论进行分页(分页号,每页展示的数量,error)
pagination = query.paginate(page, per_page=5, error_out=False)
# 获得分页后当前页显示的评论
cols = pagination.items
# 渲染主题帖展示页面
print(query)
return render_template('home/col.html',cols=cols,pagination=pagination)
@home.route("/index/")
def reindex(): # z此处index重复
return redirect(url_for("home.index"))
@home.route('/animation/')
def animation():
data = {'sgd.jpg', 'sutstudent.jpg', 'sutsight01.jpg', 'sutsight02.jpg', 'hxxy.jpg'}
return render_template('home/animation.html', data=data)
@home.route('/search/')
def search():
current_user_id = 0
current_user_name = ""
if "user" in session:
current_user_name = session["user"]
user = User.query.filter_by(name=current_user_name).first()
current_user_id = user.id
# 获取查询的内容
# search=request.args.get("search",'',type=str)
search = request.args.get("search", "搜索结果为空")
# print("搜索的的内容"+search)
# 获取当前分页页面编号(编号,默认值,类型)
page = request.args.get('page', 1, type=int)
# 从数据库中查找对应当前主题贴的评论
query = Post.query.filter(Post.title.ilike('%' + search + '%')).order_by(Post.addtime.desc())
# 对当前主题帖的评论数量进行统计
post_count = Post.query.filter(Post.title.ilike('%' + search + '%')).count()
# 对当前贴的评论进行分页(分页号,每页展示的数量,error)
pagination = query.paginate(page, per_page=5, error_out=False)
# 获得分页后当前页显示的评论
comments = pagination.items
# 渲染主题帖展示页面
return render_template("home/search.html", search=search, count=post_count, current_user_name=current_user_name,pagination=pagination, results=comments,current_user_id=current_user_id)
# 主题帖详情页
@home.route('/play/', methods=["GET", "POST"])
def play():
# 从请求参数拿到请求的post_id
post_id = request.args.get("post_id", "")
# 评论表单
form = CommentForm()
# 清除表单内容
form.data['content'] = ""
# 利用post_id找到要显示的主题贴
post = Post.query.filter(Post.id == post_id).first()
# 利用post_id在User表中查找作者姓名
author = User.query.filter(User.id == post.user_id).first()
# 从session中取得当前登陆中的用户名
current_user_id = 0
current_user_name = '游客'
if "user" in session:
current_user_name = session["user"]
user = User.query.filter_by(name=current_user_name).first()
current_user_id = user.id
# 若用户登录则显示评论发布表单
if "user" in session and form.validate_on_submit():
comment = Comment(
content=form.data["content"],
post_id=int(post_id),
user_id=current_user_id,
addtime=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
)
db.session.add(comment)
db.session.commit()
flash("评论提交成功!")
# 获取当前分页页面编号(编号,默认值,类型)
page = request.args.get('page', 1, type=int)
# 从数据库中查找对应当前主题贴的评论
query = Comment.query.join(User).filter(Comment.post_id == post_id).order_by(Comment.addtime.desc())
# 对当前主题帖的评论数量进行统计
comment_count = Comment.query.filter(Comment.post_id == post_id).count()
# 对当前贴的评论进行分页(分页号,每页展示的数量,error)
pagination = query.paginate(page, per_page=5, error_out=False)
# 获得分页后当前页显示的评论
comments = pagination.items
# 渲染主题帖展示页面
return render_template("home/play.html", post=post, form=form, comments=comments,
pagination=pagination, author=author,current_user_name=current_user_name, count=comment_count,current_user_id=current_user_id)
@home.route('/post/', methods=["GET", "POST"])
@user_login_req
def post():
form = PostForm()
current_user_name = session["user"]
user = User.query.filter_by(name=current_user_name).first()
current_user_id = user.id
if form.validate_on_submit():
data = form.data
post = Post(
title=data["title"],
content=data["content"],
user_id=current_user_id,
addtime=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
)
db.session.add(post)
db.session.commit()
flash("发布主题帖成功")
return render_template("home/post_add.html", form=form,current_user_name=current_user_name)
#404
@home.errorhandler(404)
def page_not_found(error):
return render_template("home/404.html"),404
``` |
{
"source": "1im1/winspec",
"score": 3
} |
#### File: winspec/scripts/lightfield_metadata_oliver.py
```python
import os
import csv
import re
import getpass
import logging
from xml.parsers.expat import ExpatError
import winspec
def get_files(search_root, suffix):
logging.info("Searching for {} files.".format(suffix))
parser = re.compile("{}$".format(suffix),
re.IGNORECASE)
for root, dirs, filenames in os.walk(search_root):
for filename in filenames:
if parser.search(filename):
name = os.path.join(root, filename)
logging.debug("Found {0}".format(name))
yield(name)
def match_spe_tiff_files(spe_files, tiff_files):
"""
For each spe file, find any associated tiff file by checking that they
share the same filename.
"""
logging.info("Matching spe and tiff files.")
spe = dict()
my_spe_files = list(spe_files)
my_tiff_files = list(tiff_files)
for spe_filename in my_spe_files:
spe[spe_filename] = list()
spe_name = re.sub("\.spe$", "", os.path.split(spe_filename)[1],
re.IGNORECASE)
for tiff_filename in my_tiff_files:
if re.search(spe_name, tiff_filename):
spe[spe_filename].append(tiff_filename)
my_tiff_files.remove(tiff_filename)
return(spe)
def write_metadata_database(spe, database_filename):
"""
For each spe file, write an entry to a csv file which includes the pertinent
metadata.
"""
logging.info("Writing database of metadata of {} files.".format(len(
spe.keys())))
keys = [("Exposure start", "exposure_start"),
("Exposure stop", "exposure_stop"),
("Exposure time", "exposure_time"),
("Gain", "gain"),
("AD rate", "ad_rate"),
("Frame rate", "frame_rate"),
("Readout Time", "readout_time"),
("Temperature set", "temperature_set"),
("Temperature read", "temperature_read"),
("Background file", "background_file"),
("Number of frames", "n_frames"),
("Frames per readout", "frames_per_readout")]
with open(database_filename, "wb") as stream_out:
writer = csv.writer(stream_out)
fields = ["Filename"] + list(map(lambda x: x[0], keys))
writer.writerow(fields)
for filename in sorted(spe.keys()):
try:
w = winspec.Lightfield(filename)
my_row = [filename]
for name, key in keys:
my_row.append(getattr(w, key)())
writer.writerow(my_row)
except ExpatError:
print("Failed to parse xml for {}".format(filename))
continue
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
if getpass.getuser() == "tsbischof":
spe_directory = "/home/tsbischof/src/winspec"
tiff_directory = spe_directory
report_filename = "/home/tsbischof/src/winspec/scripts/oliver.csv"
else:
spe_directory = r"E:\raw_data"
tiff_directory = spe_directory
report_filename = r"C:\Users\Oliver\Desktop\data_processing\thomas_script_results\oliver.csv"
spe_files = get_files(spe_directory, "spe")
tiff_files = get_files(spe_directory, "tiff")
spe_database = match_spe_tiff_files(spe_files, tiff_files)
write_metadata_database(spe_database, report_filename)
## write_metadata_to_tiff(spe_database)
```
#### File: winspec/scripts/oliver_image_processing.py
```python
import re
import os
import Tkinter, tkFileDialog
import numpy
import libtiff
from winspec import Lightfield
def subtract_mean(filename, dst_dir=None, discard_frames=[0], print_every=10):
root, name = os.path.split(filename)
if dst_dir is None:
dst_dir = root
name_base = re.sub("\.spe", "", name)
mean_filename = os.path.normpath(os.path.join(dst_dir,
"{}_mean_background.tif".format(name_base)))
max_filename = os.path.normpath(os.path.join(dst_dir,
"{}_max_mean_background.tif".format(name_base)))
lf = Lightfield(filename)
frames = list()
for frame_index, frame in enumerate(lf.frames()):
if frame_index % print_every == 0:
print("Frame {} of {}".format(frame_index, lf.n_frames()))
if frame_index in discard_frames:
continue
frames.append(numpy.array(list(frame.regions[0].data())))
frames = numpy.array(frames)
mean_frame = frames.sum(0)/frames.shape[0]
frames -= mean_frame[None,:,:]
print("Writing to {}".format(mean_filename))
mean_image = libtiff.TIFFimage(frames.astype("int16"))
mean_image.write_file(mean_filename, verbose=False)
print("Writing to {}".format(max_filename))
max_image = libtiff.TIFFimage(numpy.amax(frames.astype("int16"), axis=0))
max_image.write_file(max_filename, verbose=False)
if __name__ == "__main__":
root = Tkinter.Tk()
root.withdraw()
filenames = root.tk.splitlist(tkFileDialog.askopenfilenames(
title="Choose files to process",
filetypes=[("Winspec/Lightfield files", "*.spe")]))
## filenames = ["blargh.spe"]
dst_dir = tkFileDialog.askdirectory(title="Choose a destination directory")
for filename in filenames:
print("Processing {}".format(filename))
subtract_mean(filename, dst_dir=dst_dir, print_every=40)
```
#### File: winspec/winspec/cstruct.py
```python
import struct
import logging
import pprint
class CStruct(object):
def __init__(self, definition):
self.__definition = definition
def __str__(self):
result = list()
# Change this to give a lisp-like output:
# (name, value)
# and so forth
return(pprint.pformat(self.to_list()))
def from_stream(self, data):
stream_to_tuple(data, self.__definition, self)
def to_list(self):
result = list()
for name, form in self.__definition:
try:
value = getattr(self, name)
if isinstance(value, tuple):
for index, f in enumerate(value):
result.append(("{0}[{1}]".format(name, index),
f.to_list()))
elif isinstance(value, CStruct):
result.append((name, value.to_list()))
else:
result.append((name, value))
except AttributeError:
logging.debug("Attribute not found: {0}".format(name))
result.append((name, None))
return(result)
def definition(self):
return(self.__definition)
def add_tab(string, tabs=1):
result = str()
for line in string.split("\n"):
result += "\t{0}\n".format(line)
return(result)
def strip_null(string):
return(string.rstrip("\x000").lstrip("\x000"))
def stream_to_tuple(data, structure_definition, target):
"""Given the structure of interest, populates the named tuple with the
appropriate data. Ideally, this could be done with tuple._make(struct.unpack())),
but strings do not seem to work properly in this case."""
for index, definition in enumerate(structure_definition):
logging.debug(definition)
name, form = definition
number, formtype = form
logging.debug("{0}: {1}".format(name, form))
if type(formtype) == type(str()):
# We have a string format, so no recursion
formstr = "{0}{1}".format(number, formtype)
logging.debug("Reading {0} ({1}) from stream at offset {2}.".format(
name, formstr, data.tell()))
size = struct.calcsize(formstr)
my_data = data.read(size)
value = struct.unpack_from(formstr, my_data)
logging.debug("Found: {0}".format(value))
# Now that we have the data, we need to consider whether it is an
# array or a single value. If a character array, create a string.
# If a numerical array, make a list. If a single value, make a
# single value
if form[-1] in "?bBhHiIlLqQfdP":
logging.debug("Numerical value.")
# Numerical value
if form[0] == 1:
# Single value
value = value[0]
else:
value = list(value)
else:
# Character value
# Kludge to get rid of "\x00". This should be possible using nicer
# methods.
logging.debug("Character value.")
value = strip_null(bytes("".encode()).join(value).decode())
logging.debug("{0}: {1}".format(name, value))
setattr(target, name, value)
else:
# Recursion!
logging.debug("Recursing!")
if number == 1:
setattr(target, name, CStruct(formtype))
stream_to_tuple(data, formtype, getattr(target, name))
else:
setattr(target, name, tuple([CStruct(formtype)
for i in range(number)]))
for i in range(number):
stream_to_tuple(data, formtype, getattr(target, name)[i])
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
blargh_t = [
("blargh", (20, "i")),
("cool", (10, "c"))]
nyargh_t = [
("nyargh", (2, "c")),
("fjord", (10, blargh_t))]
a = CStruct(nyargh_t)
with open("WINHEAD.TXT", "rb") as data:
a.from_stream(data)
print(a)
``` |
{
"source": "1imartinez/Final_Exam001",
"score": 4
} |
#### File: 1imartinez/Final_Exam001/one.py
```python
def reverse(text):
return text[::-1]
def remove(test):
return text.replace(" ","")
def lower(text):
return text.lower()
def checkisPallendrome(text):
undercase = lower(text)
result = remove(undercase)
backwards = reverse(result)
if (result == backwards):
x = True
else:
x = False
return x
def tester(text, expected_result):
actual_result = checkisPallendrome(text)
print_text = "The text was: " + str(text) + " | "
if expected_result == actual_result:
print_text += "Correct: " + str(expected_result) + " == " + str(actual_result)
else:
print_text += "Incorrect: " + str(expected_result) + " != " + str(actual_result)
return print_text
if __name__ == "__main__":
# Try it on these to see if it works?
text0 = "My foot is a hamburger"
text1 = "Go hang a salami Im a lasagna hog"
text2 = "She sells sea shells by the sea shore"
text3 = "race car"
text4 = "My school"
# Test it like this
print(tester(text0, False))
print(tester(text1, True))
print(tester(text2, False))
print(tester(text3, True))
print(tester(text4, False))
``` |
{
"source": "1i-medialib/media-lib-tools",
"score": 3
} |
#### File: media-lib-tools/music/artist.py
```python
from utilities.logging import Logging
import psycopg2
import psycopg2.extras
class Artist:
"Music Artist class"
def __init__(self, log, ytmusic, dbh, name=None, rating=0):
self.l = log # Logging Object
self.ytm = ytmusic # youtube Music API object
self.dbh = dbh # database handle
self.id = None # pk from database
self.name = name
self.rating = rating
self.yt_id = None
def print_attributes(self):
self.l.debug('Artist:')
self.l.debug(' ID : {}'.format(self.id))
self.l.debug(' Name : {}'.format(self.name))
self.l.debug(' Rating: {}'.format(self.rating))
def load_artist_from_youtube(self,youtube_artist):
if 'id' in youtube_artist:
self.yt_id = youtube_artist['id']
if 'name' in youtube_artist:
self.name = youtube_artist['name']
self.query_artist()
self.save()
self.print_attributes()
def query_artist_by_id(self):
# query artist from db
if not self.id:
self.l.log('No id is defined to query artist by')
return
c_query = self.dbh.cursor(cursor_factory=psycopg2.extras.DictCursor)
query_statement = """
SELECT *
FROM medialib.artist s
WHERE s.id = %s
"""
c_query.execute(query_statement, (self.id,))
if c_query.rowcount == 0:
self.l.log('No artist found for id: {}'.format(self.id))
return
sdata = c_query.fetchone()
self.name = sdata['name']
self.rating = sdata['rating']
self.yt_id = sdata['youtube_id']
def query_artist(self):
# query artist from db
if self.id:
# we have an id. Query by it
self.query_artist_by_id()
return
# query by title, artist, album
self.l.log('Querying artist with name: {}, type: {}'.format(
self.name, type(self.name)))
c_query = self.dbh.cursor(cursor_factory=psycopg2.extras.DictCursor)
query_statement = """
SELECT *
FROM medialib.artist s
WHERE s.name = %s
"""
c_query.execute(query_statement,
(self.name,))
if c_query.rowcount == 0:
self.l.log('No artist found for name: {}, type: {}'.format(self.name, type(self.name)))
return
if c_query.rowcount != 1:
self.l.log('Found multiple artists!')
return
sdata = c_query.fetchone()
self.id = sdata['id']
self.name = sdata['name']
self.yt_id = sdata['youtube_id']
def update_db(self):
pass
def insert_db(self):
try:
c_stmt = self.dbh.cursor()
insert_stmt = """
insert into medialib.artist
( name, rating, youtube_id )
values
( %s, %s, %s )
RETURNING id
"""
c_stmt.execute(
insert_stmt,
(self.name, self.rating, self.yt_id )
)
self.id = c_stmt.fetchone()[0]
self.l.debug('Inserted artist: {} as id: {}'.format(
self.name, self.id))
except (Exception, psycopg2.Error) as error:
self.l.log('Error inserting artist: {}'.format(error))
self.print_attributes()
raise
finally:
c_stmt.close()
def save(self):
# save artist to database
# query first to see if it exists:
self.query_artist()
if self.id:
self.update_db()
else:
self.insert_db()
self.dbh.commit()
```
#### File: media-lib-tools/music/playlist.py
```python
from utilities.logging import Logging
from music.song import Song
class Playlist:
"Music Playlist class"
def __init__(self, utilities, ytmusic, dbh, info=None):
self.u = utilities # Utilities Object
self.ytm = ytmusic # youtube Music API object
self.dbh = dbh # database handle
self.id = None # pk from database
self.name = None
self.rating = 0
self.description = None
self.track_count = 0
self.songs = []
self.yt_id = None
if info:
if 'name' in info:
self.name = info['name']
if 'description' in info:
self.description = info['description']
if 'trackCount' in info:
self.track_count = int(info['trackCount'])
if 'songs' in info:
self.songs = info['songs']
if 'rating' in info:
self.rating = int(info['rating'])
def print_attributes(self):
self.u.log('Playlist:')
self.u.log(' Name : {}'.format(self.name))
self.u.log(' Rating : {}'.format(self.rating))
self.u.log(' Track Count: {}'.format(self.track_count))
def get_songs_from_youtube_playlist_id(self):
if not self.yt_id:
self.u.log('No yt id set')
return
pl = self.ytm.get_playlist(self.yt_id, limit=(self.track_count + 1))
for track in pl['tracks']:
s = Song(self.u,self.ytm,self.dbh)
s.load_song_from_youtube(track)
self.songs.append(s)
def load_playlist_from_youtube(self, youtube_playlist):
# self.u.pprintd(youtube_playlist)
if 'id' in youtube_playlist:
self.yt_id = youtube_playlist['id']
elif 'playlistId' in youtube_playlist:
self.yt_id = youtube_playlist['playlistId']
if 'title' in youtube_playlist:
self.name = youtube_playlist['title']
if 'description' in youtube_playlist:
self.description = youtube_playlist['description']
if 'trackCount' in youtube_playlist:
self.track_count = int(youtube_playlist['trackCount'])
elif 'count' in youtube_playlist:
self.track_count = int(youtube_playlist['count'])
if 'tracks' in youtube_playlist:
self.songs = youtube_playlist['tracks']
else:
# get songs from pl
self.get_songs_from_youtube_playlist_id()
self.print_attributes()
def save(self):
# save items in the playlist to db
self.u.debug('Saving playlist: {}'.format(self.name))
# iterate songs and save
for song in self.songs:
self.u.debug('Saving song: {}'.format(song.title))
song.save()
``` |
{
"source": "1inch/cumulative-merkle-drop",
"score": 3
} |
#### File: cumulative-merkle-drop/src/parse.py
```python
import json
from eth_utils import to_checksum_address
monthly_files = [
'data/21_08.json',
'data/21_09.json',
'data/21_10.json',
'data/21_11.json',
'data/21_12.json',
'data/bf.json'
]
data_in = []
for filename in monthly_files:
with open(filename) as in_f:
data_in.extend(json.load(in_f)['data']['get_result_by_result_id'])
dataset = []
for e in data_in:
dataset.append({
# 'tx_hash': e['data']['tx_hash'].replace('\\', '0'),
'tx_from': e['data']['tx_from'].replace('\\', '0'),
# 'eth_price': e['data']['eth_price'],
# 'inch_price': e['data']['inch_price'],
# 'inch_price': e['data']['inch_price'],
'inch_refund': e['data']['inch_refund'],
# 'eth_used': e['data']['eth_used'],
})
# with open('processed.json', 'w') as out_f:
# json.dump(dataset, out_f)
# with open('processed.json') as in_f:
# dataset = json.load(in_f)
# slippage_logs = {}
# with open('trades.csv') as in_f:
# for line in in_f.readlines()[1:]:
# _, tx_hash, _, _, slippage = line[:-1].split(',')
# slippage_logs[tx_hash[1:-1]] = float(slippage[1:-1])
# filtered_dataset = []
# def txns(addr):
# total_txns = 0
# total_refund = 0
# low_slippage_txns = 0
# low_slippage_refund = 0
# untracked_txns = 0
# untracked_refund = 0
# last_untracked_txn = ''
# for e in dataset:
# if addr is not None and addr != e['tx_from']:
# continue
# log = slippage_logs.get(e['tx_hash'])
# total_refund += e['inch_refund']
# total_txns += 1
# if log is not None:
# if log < 1.0:
# low_slippage_txns += 1
# low_slippage_refund += e['inch_refund']
# else:
# if addr is None:
# filtered_dataset.append(e)
# else:
# print(e['tx_hash'])
# else:
# untracked_txns += 1
# untracked_refund += e['inch_refund']
# last_untracked_txn = e['tx_hash']
# if addr is None:
# filtered_dataset.append(e)
# print(last_untracked_txn)
# return total_refund, total_txns, low_slippage_refund, low_slippage_txns, untracked_refund, untracked_txns
# def calc_stats(addr=None):
# total_refund, total_txns, low_slippage_refund, low_slippage_txns, untracked_refund, untracked_txns = txns(addr)
# if addr is None:
# print('Total stats:')
# else:
# print('{} stats:'.format(addr))
# print('\ttotal txns: {}, total refund: {:.0f} 1INCH'.format(total_txns, total_refund))
# print('\tlow slippage txns: {}, low slippage refund: {:.0f} 1INCH'.format(low_slippage_txns, low_slippage_refund))
# print('\tuntracked txns: {}, untracked refund: {:.0f} 1INCH'.format(untracked_txns, untracked_refund))
# [calc_stats(addr) for addr in [
# None,
# ]]
drop_data = {}
for e in dataset:
addr = to_checksum_address(e['tx_from'])
if addr in drop_data:
drop_data[addr] += int(e['inch_refund'] * 1e18)
else:
drop_data[addr] = int(e['inch_refund'] * 1e18)
# for k in sorted(drop_data.values(), reverse=True)[:5]:
# print(k)
# print(len(drop_data))
for k in drop_data.keys():
drop_data[k] = str(drop_data[k])
with open('drop_data.json', 'w') as out_f:
json.dump(drop_data, out_f)
``` |
{
"source": "1iyiwei/topopt",
"score": 3
} |
#### File: topopt/src/fesolvers.py
```python
import numpy as np
# https://docs.scipy.org/doc/scipy-0.18.1/reference/sparse.html
from scipy.sparse import coo_matrix, lil_matrix, csc_matrix, csr_matrix
from scipy.sparse.linalg import spsolve
class FESolver(object):
def __init__(self, verbose = False):
self.verbose = verbose
# finite element computation for displacement
def displace(self, load, x, ke, penal):
f = load.force()
fixdofs = np.array(load.fixdofs())
freedofs = np.array(load.freedofs())
nely, nelx = x.shape
k_freedofs = self.gk_freedofs(load, x, ke, penal)
u = np.zeros(load.dim*(nely+1)*(nelx+1));
u[freedofs] = spsolve(k_freedofs, f[freedofs])
u[fixdofs] = 0.0
return u
# global stiffness matrix
def gk_freedofs(self, load, x, ke, penal):
raise NotImplementedError
# Using lil_matrix is quite slow
class LilFESolver(FESolver):
def __init__(self, verbose = False):
super().__init__(verbose)
def gk_freedofs(self, load, x, ke, penal):
nelx, nely = load.shape()
dof = load.dim*(nelx+1)*(nely+1)
k = lil_matrix((dof, dof))
for elx in range(nelx):
for ely in range(nely):
sel = load.edof(elx, ely, nelx, nely)
k[np.ix_(sel, sel)] += ke*(x[ely, elx]**penal);
freedofs = np.array(load.freedofs())
k_freedofs = k[np.ix_(freedofs, freedofs)].tocsc()
return k_freedofs
# coo_matrix should be faster
class CooFESolver(FESolver):
def __init__(self, verbose = False):
super().__init__(verbose)
def gk_freedofs(self, load, x, ke, penal):
nelx, nely = load.shape()
edof, x_list, y_list = load.edof(nelx, nely)
kd = x.T.reshape(nelx*nely, 1, 1) ** penal
value_list = (np.tile(ke, (nelx*nely, 1, 1))*kd).flatten()
# coo_matrix automatically sums duplicated entries, so it is handy
dof = load.dim*(nelx+1)*(nely+1)
k = coo_matrix((value_list, (y_list, x_list)), shape=(dof, dof)).tocsc()
freedofs = load.freedofs()
k_freedofs = k[freedofs,:][:,freedofs]
return k_freedofs
``` |
{
"source": "1iyiwei/video_ffmpeg",
"score": 3
} |
#### File: 1iyiwei/video_ffmpeg/video.py
```python
import sys
import os
import shutil
import subprocess
import re
import glob
import shlex
video_codec = 'libx264'
audio_codec = 'libfdk_aac'
preset = 'medium' # medium, veryslow
def kbsfromline(line):
m = re.search('(,\s*)(\d+)(\s*kb/s)', line)
if (m!= None):
answer = m.group(2)
else:
answer = ''
return answer
def bitrate(input_video_file_name):
video_bitrate = ''
audio_bitrate = ''
query_command = "ffmpeg -i " + input_video_file_name
process = subprocess.Popen(shlex.split(query_command), stderr=subprocess.PIPE)
text = process.stderr.read()
retcode = process.wait()
lines = text.split("\n")
for line in lines:
m = re.search('Stream.*Video', line)
if(m != None):
video_bitrate = kbsfromline(line)
m = re.search('Stream.*Audio', line)
if(m != None):
audio_bitrate = kbsfromline(line)
return [video_bitrate, audio_bitrate]
# convert video from one format to another
def ffmpeg_convert(input_video_file_name, output_video_file_name, two_pass=True):
#qp is the quantization parameter for the fixed-quality setting (0 = lossless). I cannot be sure if x264 is acceptable to the system yet, but it can be opened by Quicktime so I guess it is fine.
#command = "ffmpeg -i " + input_video_file_name + " -vcodec libx264 -preset veryslow -qp 3 -pix_fmt yuv420p -loglevel warning " + output_video_file_name
[video_bitrate, audio_bitrate] = bitrate(input_video_file_name)
if video_bitrate == "":
two_pass = False
if two_pass:
# two pass to control output size
# https://trac.ffmpeg.org/wiki/Encode/H.264
spec = " -c:v " + video_codec + " -preset " + preset + " -b:v " + video_bitrate + 'k'
command_pass1 = "ffmpeg -loglevel warning -y -i " + input_video_file_name + spec + " -pass 1 " + " -f mp4 NUL"
command_pass2 = "ffmpeg -loglevel warning -i " + input_video_file_name + spec + " -pass 2 " + output_video_file_name
command = command_pass1 + " && " + command_pass2
else:
input_video_file_size = os.path.getsize(input_video_file_name)
spec = " -fs " + str(input_video_file_size) + " -preset " + preset + " "
command = "ffmpeg -loglevel warning -i " + input_video_file_name + spec + output_video_file_name
os.system(command)
if two_pass:
remove_file_list = glob.glob("ffmpeg*pass*log*")
for remove_file in remove_file_list:
os.remove(remove_file)
``` |
{
"source": "1izard/mvs",
"score": 3
} |
#### File: mvs/tests/test_renames.py
```python
import pytest
import os
import pathlib
import shutil
from renames import renames
HERE = os.path.dirname(os.path.abspath(__file__))
TEST_PRODUCTS_PATH = os.path.join(HERE, 'test_products')
TESTD0_PATH = os.path.join(HERE, 'testd0')
TESTD1_PATH = os.path.join(HERE, 'testd1')
TESTD2_PATH = os.path.join(HERE, 'testd2')
def set_test_files():
if os.path.isdir(TESTD2_PATH):
shutil.rmtree(TESTD2_PATH)
os.makedirs(TESTD2_PATH)
for i in range(3):
inner_dir_path = os.path.join(TESTD2_PATH, f'tmpd2_{i}')
os.makedirs(inner_dir_path)
inner_file_path = os.path.join(TESTD2_PATH, f'tmp2_{i}.txt')
with open(inner_file_path, 'w') as f:
f.write(str(os.path.basename(inner_file_path)))
for j in range(3):
pathlib.Path(os.path.join(inner_dir_path, f'tmp2_{i}_{j}.txt')).touch()
def test_glob_file_names():
expected = ['tmpd0', 'tmpd1', 'tmpd2', 'tmp0.txt', 'tmp1.txt', 'tmp2.txt']
expected.sort()
actual = renames.glob_file_names(TESTD0_PATH)
actual.sort()
assert actual == expected
def test_write_file_names():
file_names = ['tmpd0', 'tmpd1', 'tmpd2', 'tmp0.txt', 'tmp1.txt', 'tmp2.txt']
expected = ['tmpd0 >> tmpd0', 'tmpd1 >> tmpd1', 'tmpd2 >> tmpd2',
'tmp0.txt >> tmp0.txt', 'tmp1.txt >> tmp1.txt', 'tmp2.txt >> tmp2.txt']
expected.sort()
dst_path = os.path.join(TEST_PRODUCTS_PATH, 'test_write_file_names_result.txt')
renames.write_file_names(file_names, dst_path)
with open(dst_path, 'r') as f:
lines = [l.strip('\n') for l in f]
actual = sorted(lines)
assert actual == expected
def test_read_file_lines():
expected = [
'src1 >> dst1',
'src2 >> dst2',
'src3 >> dst3',
]
actual = renames.read_file_list_lines(os.path.join(TESTD1_PATH, 'filelist.txt'))
actual
assert actual == expected
def test_build_file_name_pair_valid():
expected = ('src1', 'dst1')
arg = 'src1 >> dst1'
actual = renames.build_file_name_pair(arg)
assert actual == expected
def test_build_file_name_pair_with_invalid_char_slash():
args = ['/src1 >> dst1', 'src1 >> /dst1']
for i, arg in enumerate(args, 1):
with pytest.raises(ValueError) as excinfo:
renames.build_file_name_pair(arg)
assert 'line {}: Invalid character' in str(excinfo.value)
def test_build_file_name_pair_with_invalid_char_single_quote():
arg = "'/src1' >> dst1"
with pytest.raises(ValueError) as excinfo:
renames.build_file_name_pair(arg)
assert 'line {}: Invalid character' in str(excinfo.value)
def test_build_file_name_pair_with_invalid_char_double_quote():
arg = '"/src1" >> dst1'
with pytest.raises(ValueError) as excinfo:
renames.build_file_name_pair(arg)
assert 'line {}: Invalid character' in str(excinfo.value)
def test_build_file_name_pair_invalid_syntax():
args = ['src1 > dst1', 'src 1 >> dst1']
for arg in args:
with pytest.raises(ValueError) as excinfo:
renames.build_file_name_pair(arg)
assert 'line {}: Invalid syntax' in str(excinfo.value)
def test_has_duplicate_value_true():
expected = True
arg = {
'src1': 'dst1',
'src2': 'dst1',
}
actual = renames.has_duplicate_value(arg)
assert actual == expected
def test_has_duplicate_false():
expected = False
arg = {
'src1': 'dst1',
'src2': 'dst2',
}
actual = renames.has_duplicate_value(arg)
assert actual == expected
def test_build_file_name_map_valid():
expected = {
'src1': 'dst1',
'src2': 'dst2'
}
arg = ['src1 >> dst1', 'src2 >> dst2']
actual = renames.build_file_name_map(arg)
assert actual == expected
def test_build_file_name_map_with_invalid_syntax():
with pytest.raises(ValueError) as excinfo:
arg = ['src1 > dst1', 'src2 > dst2']
renames.build_file_name_map(arg)
assert 'line 1: Invalid syntax;' in str(excinfo.value)
def test_build_file_name_map_with_duplicate_src():
with pytest.raises(ValueError) as excinfo:
arg = ['src1 >> dst1', 'src1 >> dst2']
renames.build_file_name_map(arg)
assert 'Found a duplicate src file name' in str(excinfo.value)
def test_build_file_name_map_with_duplicate_dst():
with pytest.raises(ValueError) as excinfo:
arg = ['src1 >> dst1', 'src2 >> dst1']
renames.build_file_name_map(arg)
assert 'Found a duplicate dst file name' in str(excinfo.value)
def test_rename_files_normal():
set_test_files()
file_name_map = {
'tmpd2_0': 'afterd2_0',
'tmpd2_1': 'afterd2_1',
'tmpd2_2': 'afterd2_2',
'tmp2_0.txt': 'after2_0',
'tmp2_1.txt': 'after2_1',
'tmp2_2.txt': 'after2_2',
}
expected = set(file_name_map.values())
renames.rename_files(file_name_map, TESTD2_PATH)
with os.scandir(TESTD2_PATH) as it:
actual = set([e.name for e in it])
assert actual == expected
def test_rename_files_node():
set_test_files()
file_name_map = {
'tmpd2_0': 'tmpd2_1',
'tmpd2_1': 'tmpd2_2',
'tmpd2_2': 'tmpd2_3',
'tmp2_0.txt': 'tmp2_1.txt',
'tmp2_1.txt': 'tmp2_2.txt',
'tmp2_2.txt': 'tmp2_3.txt',
}
expected = set(file_name_map.values())
renames.rename_files(file_name_map, TESTD2_PATH)
with os.scandir(TESTD2_PATH) as it:
actual = set([e.name for e in it])
assert actual == expected
def test_rename_files_cycle():
set_test_files()
file_name_map = {
'tmpd2_0': 'tmpd2_1',
'tmpd2_1': 'tmpd2_2',
'tmpd2_2': 'tmpd2_0',
'tmp2_0.txt': 'tmp2_1.txt',
'tmp2_1.txt': 'tmp2_2.txt',
'tmp2_2.txt': 'tmp2_0.txt',
}
expected = set(file_name_map.values())
renames.rename_files(file_name_map, TESTD2_PATH)
with os.scandir(TESTD2_PATH) as it:
actual = set([e.name for e in it])
assert actual == expected
def test_rename_files_cycle_pair():
set_test_files()
file_name_map = {
'tmpd2_0': 'tmpd2_1',
'tmpd2_1': 'tmpd2_0',
'tmpd2_2': 'tmpd2_2',
'tmp2_0.txt': 'tmp2_1.txt',
'tmp2_1.txt': 'tmp2_0.txt',
'tmp2_2.txt': 'tmp2_2.txt',
}
expected = set(file_name_map.values())
renames.rename_files(file_name_map, TESTD2_PATH)
with os.scandir(TESTD2_PATH) as it:
actual = set([e.name for e in it])
assert actual == expected
def test_rename_files_including_same():
set_test_files()
file_name_map = {
'tmpd2_0': 'tmpd2_0',
'tmpd2_1': 'tmpd2_1',
'tmpd2_2': 'tmpd2_3',
'tmp2_0.txt': 'tmp2_0.txt',
'tmp2_1.txt': 'tmp2_1.txt',
'tmp2_2.txt': 'tmp2_3.txt',
}
expected = set(file_name_map.values())
renames.rename_files(file_name_map, TESTD2_PATH)
with os.scandir(TESTD2_PATH) as it:
actual = set([e.name for e in it])
assert actual == expected
``` |
{
"source": "1izard/poetryenv",
"score": 2
} |
#### File: poetryenv/poetryenv/environments.py
```python
import os
def poetry_installed():
paths = os.environ.get('PATH', '')
poetry_path = os.path.join('.poetry', 'bin')
return poetry_path in paths
PYENV_INSTALLED = bool(os.environ.get('PYENV_SHELL') or os.environ.get('PYENV_ROOT'))
POETRY_INSTALLED = poetry_installed()
``` |
{
"source": "1jinwoo/YHack2018",
"score": 3
} |
#### File: 1jinwoo/YHack2018/item_title_cleaner.py
```python
import data_cleaner as dc
import re
# In[42]:
def clean_item_data():
#read in file using data_cleaner
df = dc.read_search_strings()
'''
removing . and all non-alphanumeric characters at the end of each word (e.g. 'oz.')
and preventing the removing of '7.5mm', except ", ', and space.
Æ stays, might remove later
'''
for index, row in df.iterrows():
new_string = ''
for item in row['item_title']:
item = ''.join(c for c in item if c.isalnum() or c == '\"' or c == '\'' or c == ' ' or c == '.' or c == '$')
new_string += item
word_list = new_string.split()
new_word = ''
for w in word_list:
if w.endswith('.'):
new_word += w[:-1] + ' '
else:
new_word += w + ' '
new_string = new_word
df.at[index, 'item_title']= new_string
return df
# In[43]:
'''
Returns item_title at specified index (from 0 to 11121)
'''
def item_title(index):
return df.loc[index]['item_title']
# In[44]:
if __name__ == '__main__':
df = clean_item_data()
for n in range(11121):
print(n, item_title(n))
``` |
{
"source": "1joker4ever/libapps-mirror",
"score": 2
} |
#### File: libdot/bin/libdot.py
```python
from __future__ import print_function
import argparse
import base64
import importlib.machinery
import io
import logging
import logging.handlers
import os
from pathlib import Path
import subprocess
import sys
import time
import types
from typing import Dict, List
import urllib.request
# Require recent Python 3 versions as a sanity check.
# NB: We cannot require newer versions than CrOS itself supports.
assert (sys.version_info.major, sys.version_info.minor) >= (3, 6), (
'Python 3.6 or newer is required; found %s' % (sys.version,))
BIN_DIR = Path(__file__).resolve().parent
DIR = BIN_DIR.parent
LIBAPPS_DIR = DIR.parent
class ColoredFormatter(logging.Formatter):
"""Colorize warning/error messages automatically."""
_COLOR_MAPPING = {
'WARNING': '\033[1;33m',
'ERROR': '\033[1;31m'
}
_RESET = '\033[m'
def __init__(self, *args, **kwargs):
"""Initialize!"""
self._use_colors = 'NOCOLOR' not in os.environ
super(ColoredFormatter, self).__init__(*args, **kwargs)
def format(self, record):
"""Formats |record| with color."""
msg = super(ColoredFormatter, self).format(record)
color = self._COLOR_MAPPING.get(record.levelname)
if self._use_colors and color:
msg = '%s%s%s' % (color, msg, self._RESET)
return msg
def setup_logging(debug=False, quiet=0):
"""Setup the logging module."""
fmt = '%(asctime)s: %(levelname)-7s: '
if debug:
fmt += '%(filename)s:%(funcName)s: '
fmt += '%(message)s'
# 'Sat, 05 Oct 2013 18:58:50 -0400 (EST)'
datefmt = '%a, %d %b %Y %H:%M:%S %z'
tzname = time.strftime('%Z', time.localtime())
if tzname and ' ' not in tzname and len(tzname) <= 5:
# If the name is verbose, don't include it. Some systems like to use
# "Eastern Daylight Time" which is much too chatty.
datefmt += f' ({tzname})'
if debug:
level = logging.DEBUG
elif quiet <= 0:
level = logging.INFO
elif quiet <= 1:
level = logging.WARNING
elif quiet <= 2:
level = logging.ERROR
elif quiet <= 3:
level = logging.CRITICAL
formatter = ColoredFormatter(fmt, datefmt)
handler = logging.StreamHandler(stream=sys.stdout)
handler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler)
logger.setLevel(level)
class ArgumentParser(argparse.ArgumentParser):
"""Custom parser to hold a consistent set of options & runtime env."""
def __init__(self, short_options=True, **kwargs):
"""Initialize!"""
super(ArgumentParser, self).__init__(**kwargs)
self.add_common_arguments(short_options=short_options)
def parse_args(self, args=None, namespace=None):
"""Parse all the |args| and save the results to |namespace|."""
# This will call our parse_known_args below, so don't use setup_logging.
namespace = argparse.ArgumentParser.parse_args(
self, args=args, namespace=namespace)
return namespace
def parse_known_args(self, args=None, namespace=None):
"""Parse all the |args| and save the results to |namespace|."""
namespace, unknown_args = argparse.ArgumentParser.parse_known_args(
self, args=args, namespace=namespace)
setup_logging(debug=namespace.debug, quiet=namespace.quiet)
return (namespace, unknown_args)
def add_common_arguments(self, short_options=True):
"""Add our custom/consistent set of command line flags."""
getopts = lambda *args: args if short_options else args[1:]
self.add_argument(*getopts('-d', '--debug'), action='store_true',
help='Run with debug output.')
self.add_argument(*getopts('-q', '--quiet'), action='count', default=0,
help='Use once to hide info messages, twice to hide '
'warnings, and thrice to hide errors.')
def touch(path):
"""Touch (and truncate) |path|."""
open(path, 'wb').close()
def unlink(path):
"""Remove |path| and ignore errors if it doesn't exist."""
try:
os.unlink(path)
except FileNotFoundError:
pass
def symlink(target, path):
"""Always symlink |path| to a relativized |target|."""
unlink(path)
path = os.path.realpath(path)
target = os.path.relpath(os.path.realpath(target), os.path.dirname(path))
logging.info('Symlinking %s -> %s', path, target)
os.symlink(target, path)
def cmdstr(cmd):
"""Return a string for the |cmd| list w/reasonable quoting."""
if isinstance(cmd, str):
return cmd
quoted = []
for arg in cmd:
if isinstance(arg, Path):
arg = str(arg)
if ' ' in arg:
arg = '"%s"' % (arg,)
quoted.append(arg)
return ' '.join(quoted)
def run(cmd: List[str],
cmd_prefix: List[str] = None,
log_prefix: List[str] = None,
check: bool = True,
cwd: str = None,
extra_env: Dict[str, str] = None,
**kwargs):
"""Run |cmd| inside of |cwd| and exit if it fails.
Args:
cmd: The command to run.
cmd_prefix: (Unlogged) prefix for the command to run. Useful for passing
interpreters like `java` or `python` but omitting from default output.
log_prefix: Prefix for logging the command, but not running. Useful for
wrapper scripts that get executed directly and use |cmd_prefix|.
check: Whether to exit if |cmd| execution fails.
cwd: The working directory to run |cmd| inside of.
extra_env: Extra environment settings to set before running.
Returns:
A subprocess.CompletedProcess instance.
"""
# Python 3.6 doesn't support capture_output.
if sys.version_info < (3, 7):
capture_output = kwargs.pop('capture_output', None)
if capture_output:
assert 'stdout' not in kwargs and 'stderr' not in kwargs
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.PIPE
# The |env| setting specifies the entire environment, so we need to manually
# merge our |extra_env| settings into it before passing it along.
if extra_env is not None:
env = kwargs.pop('env', os.environ)
env = env.copy()
env.update(extra_env)
kwargs['env'] = env
if not log_prefix:
log_prefix = []
log_cmd = log_prefix + cmd
if not cmd_prefix:
cmd_prefix = []
real_cmd = cmd_prefix + cmd
if cwd is None:
cwd = os.getcwd()
logging.info('Running: %s\n (cwd = %s)', cmdstr(log_cmd), cwd)
if cmd_prefix:
logging.debug('Real full command: %s', cmdstr(real_cmd))
result = subprocess.run(real_cmd, cwd=cwd, check=False, **kwargs)
if check and result.returncode:
logging.error('Running %s failed!', log_cmd[0])
if result.stdout is not None:
logging.error('stdout:\n%s', result.stdout)
if result.stderr is not None:
logging.error('stderr:\n%s', result.stderr)
sys.exit(result.returncode)
return result
def unpack(archive, cwd=None, files=()):
"""Unpack |archive| into |cwd|."""
if cwd is None:
cwd = os.getcwd()
if files:
files = ['--'] + list(files)
else:
files = []
# Try to make symlink usage easier in Windows.
extra_env = {
'MSYS': 'winsymlinks:nativestrict',
}
logging.info('Unpacking %s', os.path.basename(archive))
# We use relpath here to help out tar on platforms where it doesn't like
# paths with colons in them (e.g. Windows). We have to construct the full
# before running through relpath as relative archives will implicitly be
# checked against os.getcwd rather than the explicit cwd.
src = os.path.relpath(os.path.join(cwd, archive), cwd)
run(['tar', '-xf', src] + files, cwd=cwd, extra_env=extra_env)
def fetch_data(uri: str, output=None, verbose: bool = False, b64: bool = False):
"""Fetch |uri| and write the results to |output| (or return BytesIO)."""
# This is the timeout used on each blocking operation, not the entire
# life of the connection. So it's used for initial urlopen and for each
# read attempt (which may be partial reads). 5 minutes should be fine.
TIMEOUT = 5 * 60
if output is None:
output = io.BytesIO()
with urllib.request.urlopen(uri, timeout=TIMEOUT) as infp:
mb = 0
length = infp.length
while True:
data = infp.read(1024 * 1024)
if not data:
break
# Show a simple progress bar if the user is interactive.
if verbose:
mb += 1
print('~%i MiB downloaded' % (mb,), end='')
if length:
percent = mb * 1024 * 1024 * 100 / length
print(' (%.2f%%)' % (percent,), end='')
print('\r', end='', flush=True)
if b64:
data = base64.b64decode(data)
output.write(data)
return output
def fetch(uri, output, b64=False):
"""Download |uri| and save it to |output|."""
output = os.path.abspath(output)
distdir, name = os.path.split(output)
if os.path.exists(output):
logging.info('Using existing download: %s', name)
return
logging.info('Downloading %s to %s', uri, output)
os.makedirs(distdir, exist_ok=True)
# Use kokoro build cache or Gentoo distdir if available.
for envvar in ('KOKORO_GFILE_DIR', 'DISTDIR'):
cache_dir = os.getenv(envvar)
if cache_dir:
cache_file = os.path.join(cache_dir, name)
if os.path.exists(cache_file):
logging.info(' Cache hit via %s', envvar)
symlink(cache_file, output)
return
# Don't be verbose if running on CI systems.
verbose = os.isatty(sys.stdout.fileno())
# We use urllib rather than wget or curl to avoid external utils & libs.
# This seems to be good enough for our needs.
tmpfile = output + '.tmp'
for _ in range(0, 5):
try:
with open(tmpfile, 'wb') as outfp:
fetch_data(uri, outfp, verbose=verbose, b64=b64)
break
except ConnectionError as e:
time.sleep(1)
logging.warning('Download failed; retrying: %s', e)
else:
logging.error('Unabled to download; giving up')
unlink(tmpfile)
sys.exit(1)
# Clear the progress bar.
if verbose:
print(' ' * 80, end='\r')
os.rename(tmpfile, output)
def node_and_npm_setup():
"""Download our copies of node & npm to our tree and updates env ($PATH)."""
# We have to update modules first as it'll nuke the dir node lives under.
node.modules_update()
node.update()
def load_module(name, path):
"""Load a module from the filesystem.
Args:
name: The name of the new module to import.
path: The full path to the file to import.
"""
loader = importlib.machinery.SourceFileLoader(name, path)
module = types.ModuleType(loader.name)
loader.exec_module(module)
return module
class HelperProgram:
"""Wrapper around local programs that get reused by other projects.
This allows people to do inprocess execution rather than having to fork+exec
another Python instance.
This allows us to avoid filesystem symlinks (which aren't portable), and to
avoid naming programs with .py extensions, and to avoid clashes between
projects that use the same program name (e.g. "import lint" would confuse
libdot/bin/lint & nassh/bin/lint), and to avoid merging all libdot helpers
into the single libdot.py module.
"""
_BIN_DIR = BIN_DIR
def __init__(self, name, path=None):
"""Initialize.
Args:
name: The base name of the program to import.
path: The full path to the file. It defaults to libdot/bin/|name|.
"""
self._name = name
if path is None:
path = os.path.join(self._BIN_DIR, name)
self._path = path
self._module_cache = None
@property
def _module(self):
"""Load & cache the program module."""
if self._module_cache is None:
self._module_cache = load_module(self._name, self._path)
return self._module_cache
def __getattr__(self, name):
"""Dynamic forwarder to module members."""
return getattr(self._module, name)
# Wrappers around libdot/bin/ programs for other tools to access directly.
closure_compiler = HelperProgram('closure-compiler')
concat = HelperProgram('concat')
cpplint = HelperProgram('cpplint')
eslint = HelperProgram('eslint')
headless_chrome = HelperProgram('headless-chrome')
lint = HelperProgram('lint')
load_tests = HelperProgram('load_tests')
minify_translations = HelperProgram('minify-translations')
node = HelperProgram('node')
pylint = HelperProgram('pylint')
``` |
{
"source": "1jsebastian/gimme-aws-creds",
"score": 3
} |
#### File: gimme-aws-creds/tests/test_registered_authenticators.py
```python
import json
import os
import unittest
from gimme_aws_creds.registered_authenticators import RegisteredAuthenticators, RegisteredAuthenticator
from tests.user_interface_mock import MockUserInterface
class TestConfig(unittest.TestCase):
"""Class to test RegisteredAuthenticators Class."""
def setUp(self):
"""Set up for the unit tests"""
ui_obj = MockUserInterface()
self.registered_authenticators = RegisteredAuthenticators(ui_obj)
self.file_path = self.registered_authenticators._json_path
def test_file_creation_post_init(self):
assert os.path.exists(self.file_path)
def test_add_authenticator_sanity(self):
cred_id, user = b'my-credential-id', '<PASSWORD>'
self.registered_authenticators.add_authenticator(cred_id, user)
with open(self.file_path) as f:
data = json.load(f)
assert len(data) == 1
assert type(data) == list
assert type(data[0]) == dict
authenticator = RegisteredAuthenticator(**data[0])
assert authenticator.user == user
def test_get_authenticator_user_sanity(self):
cred_id, user = b'my-credential-id', 'my-<PASSWORD>'
self.registered_authenticators.add_authenticator(cred_id, user)
authenticator_user = self.registered_authenticators.get_authenticator_user(cred_id)
assert authenticator_user == user
``` |
{
"source": "1jsingh/rl_bipedal",
"score": 2
} |
#### File: deep_rl/network/network_heads.py
```python
from .network_utils import *
from .network_bodies import *
class ActorCriticNet(nn.Module):
def __init__(self, state_dim, action_dim, phi_body, actor_body, critic_body):
super(ActorCriticNet, self).__init__()
if phi_body is None: phi_body = DummyBody(state_dim)
if actor_body is None: actor_body = DummyBody(phi_body.feature_dim)
if critic_body is None: critic_body = DummyBody(phi_body.feature_dim)
self.phi_body = phi_body
self.actor_body = actor_body
self.critic_body = critic_body
self.fc_action = layer_init(nn.Linear(actor_body.feature_dim, action_dim), 1e-3)
self.fc_critic = layer_init(nn.Linear(critic_body.feature_dim, 1), 1e-3)
self.actor_params = list(self.actor_body.parameters()) + list(self.fc_action.parameters())
self.critic_params = list(self.critic_body.parameters()) + list(self.fc_critic.parameters())
self.phi_params = list(self.phi_body.parameters())
class GaussianActorCriticNet(nn.Module, BaseNet):
def __init__(self,
state_dim,
action_dim,
phi_body=None,
actor_body=None,
critic_body=None):
super(GaussianActorCriticNet, self).__init__()
self.network = ActorCriticNet(state_dim, action_dim, phi_body, actor_body, critic_body)
self.std = nn.Parameter(torch.zeros(action_dim))
self.to(Config.DEVICE)
def forward(self, obs, action=None):
obs = tensor(obs)
phi = self.network.phi_body(obs)
phi_a = self.network.actor_body(phi)
phi_v = self.network.critic_body(phi)
mean = F.tanh(self.network.fc_action(phi_a))
v = self.network.fc_critic(phi_v)
dist = torch.distributions.Normal(mean, F.softplus(self.std))
if action is None:
action = dist.sample()
log_prob = dist.log_prob(action).sum(-1).unsqueeze(-1)
entropy = dist.entropy().sum(-1).unsqueeze(-1)
return {'a': action,
'log_pi_a': log_prob,
'ent': entropy,
'mean': mean,
'v': v}
```
#### File: rl_bipedal/ppo/utils.py
```python
from parallelEnv import parallelEnv
import matplotlib
import matplotlib.pyplot as plt
import torch
import numpy as np
from JSAnimation.IPython_display import display_animation
from matplotlib import animation
from IPython.display import display
import random as rand
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# collect trajectories for a parallelized parallelEnv object
def collect_trajectories(envs, policy, tmax=200, nrand=5):
# number of parallel instances
n=len(envs.ps)
#initialize returning lists and start the game!
state_list=[]
reward_list=[]
action_list=[]
states = envs.reset()
for t in range(tmax):
# probs will only be used as the pi_old
# no gradient propagation is needed
# so we move it to the cpu
actions = policy(states).squeeze().cpu().detach().numpy()
next_states,rewards,dones,_ = envs.step(actions)
# store the result
state_list.append(states)
reward_list.append(rewards)
action_list.append(action)
# stop if any of the trajectories is done
# we want all the lists to be retangular
if np.any(dones):
break
# return pi_theta, states, actions, rewards, probability
return state_list, action_list, reward_list
def clipped_surrogate(policy, old_actions, states, actions, rewards,
discount=0.995,
epsilon=0.1, beta=0.01):
discount = discount**np.arange(len(rewards))
rewards = np.asarray(rewards)*discount[:,np.newaxis]
# convert rewards to future rewards
rewards_future = rewards[::-1].cumsum(axis=0)[::-1]
mean = np.mean(rewards_future, axis=1)
std = np.std(rewards_future, axis=1) + 1.0e-10
rewards_normalized = (rewards_future - mean[:,np.newaxis])/std[:,np.newaxis]
# convert everything into pytorch tensors and move to gpu if available
actions = torch.tensor(actions, dtype=torch.int8, device=device)
old_actions = torch.tensor(old_actions, dtype=torch.float, device=device)
rewards = torch.tensor(rewards_normalized, dtype=torch.float, device=device)
# ratio for clipping
ratio = new_actions/old_actions
# clipped function
clip = torch.clamp(ratio, 1-epsilon, 1+epsilon)
clipped_surrogate = torch.min(ratio*rewards, clip*rewards)
# include a regularization term
# this steers new_policy towards 0.5
# add in 1.e-10 to avoid log(0) which gives nan
entropy = -(new_probs*torch.log(old_probs+1.e-10)+ \
(1.0-new_probs)*torch.log(1.0-old_probs+1.e-10))
# this returns an average of all the entries of the tensor
# effective computing L_sur^clip / T
# averaged over time-step and number of trajectories
# this is desirable because we have normalized our rewards
return torch.mean(clipped_surrogate + beta*entropy)
``` |
{
"source": "1jsingh/sudoku_solver",
"score": 4
} |
#### File: 1jsingh/sudoku_solver/solution.py
```python
from utils import *
from collections import defaultdict
row_units = [cross(r, cols) for r in rows]
column_units = [cross(rows, c) for c in cols]
square_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')]
unitlist = row_units + column_units + square_units
# TODO: Update the unit list to add the new diagonal units
diagonal_units = [[row_units[i][i] for i in range(9)],[row_units[i][9-i-1] for i in range(9)]]
unitlist = unitlist + diagonal_units
# Must be called after all units (including diagonals) are added to the unitlist
units = extract_units(unitlist, boxes)
peers = extract_peers(units, boxes)
# print(peers)
def naked_twins(values):
"""Eliminate values using the naked twins strategy.
The naked twins strategy says that if you have two or more unallocated boxes
in a unit and there are only two digits that can go in those two boxes, then
those two digits can be eliminated from the possible assignments of all other
boxes in the same unit.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict
The values dictionary with the naked twins eliminated from peers
Notes
-----
Your solution can either process all pairs of naked twins from the input once,
or it can continue processing pairs of naked twins until there are no such
pairs remaining -- the project assistant test suite will accept either
convention. However, it will not accept code that does not process all pairs
of naked twins from the original input. (For example, if you start processing
pairs of twins and eliminate another pair of twins before the second pair
is processed then your code will fail the PA test suite.)
The first convention is preferred for consistency with the other strategies,
and because it is simpler (since the reduce_puzzle function already calls this
strategy repeatedly).
See Also
--------
Pseudocode for this algorithm on github:
https://github.com/udacity/artificial-intelligence/blob/master/Projects/1_Sudoku/pseudocode.md
"""
# TODO: Implement this function!
twins = defaultdict(list)
out = values.copy()
for box in boxes:
if len(values[box])==2:
twins[values[box]].append(box)
for key in twins.keys():
# print (key)
twin_pair_list = twins[key]
if len(twin_pair_list)>=2:
for i in range(len(twin_pair_list)-1):
peer_list = peers[twin_pair_list[i]]
# unit_list = list(map(tuple,units[twin_pair_list[i]]))
for j in range(i+1,len(twin_pair_list)):
if twin_pair_list[j] in peer_list:
peer_list_check = peers[twin_pair_list[j]]
common_unit = list(set(peer_list).intersection(peer_list_check))
for unit_box in common_unit:
if unit_box not in [twin_pair_list[i],twin_pair_list[j]]:
digits = key
for digit in digits:
#out[unit_box] = out[unit_box].replace(digit,'')
out = assign_value(out,unit_box,out[unit_box].replace(digit,''))
return out
def eliminate(values):
"""Apply the eliminate strategy to a Sudoku puzzle
The eliminate strategy says that if a box has a value assigned, then none
of the peers of that box can have the same value.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict
The values dictionary with the assigned values eliminated from peers
"""
# TODO: Copy your code from the classroom to complete this function
solved_values = [box for box in values.keys() if len(values[box]) == 1]
for box in solved_values:
digit = values[box]
for peer in peers[box]:
#values[peer] = values[peer].replace(digit,'')
values = assign_value(values,peer,values[peer].replace(digit,''))
return values
def only_choice(values):
"""Apply the only choice strategy to a Sudoku puzzle
The only choice strategy says that if only one box in a unit allows a certain
digit, then that box must be assigned that digit.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict
The values dictionary with all single-valued boxes assigned
Notes
-----
You should be able to complete this function by copying your code from the classroom
"""
# TODO: Copy your code from the classroom to complete this function
for unit in unitlist:
for digit in '123456789':
dplaces = [box for box in unit if digit in values[box]]
if len(dplaces) == 1:
#values[dplaces[0]] = digit
values = assign_value(values,dplaces[0],digit)
return values
def reduce_puzzle(values):
"""Reduce a Sudoku puzzle by repeatedly applying all constraint strategies
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict or False
The values dictionary after continued application of the constraint strategies
no longer produces any changes, or False if the puzzle is unsolvable
"""
# TODO: Copy your code from the classroom and modify it to complete this function
solved_values = [box for box in values.keys() if len(values[box]) == 1]
stalled = False
while not stalled:
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
values = eliminate(values)
values = only_choice(values)
values = naked_twins(values)
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
stalled = solved_values_before == solved_values_after
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values
def search(values):
"""Apply depth first search to solve Sudoku puzzles in order to solve puzzles
that cannot be solved by repeated reduction alone.
Parameters
----------
values(dict)
a dictionary of the form {'box_name': '123456789', ...}
Returns
-------
dict or False
The values dictionary with all boxes assigned or False
Notes
-----
You should be able to complete this function by copying your code from the classroom
and extending it to call the naked twins strategy.
"""
# TODO: Copy your code from the classroom to complete this function
values = reduce_puzzle(values)
if values is False:
return False ## Failed earlier
if all(len(values[s]) == 1 for s in boxes):
return values ## Solved!
# Choose one of the unfilled squares with the fewest possibilities
n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)
# Now use recurrence to solve each one of the resulting sudokus, and
for value in values[s]:
new_sudoku = values.copy()
new_sudoku[s] = value
attempt = search(new_sudoku)
if attempt:
return attempt
def solve(grid):
"""Find the solution to a Sudoku puzzle using search and constraint propagation
Parameters
----------
grid(string)
a string representing a sudoku grid.
Ex. '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
Returns
-------
dict or False
The dictionary representation of the final sudoku grid or False if no solution exists.
"""
values = grid2values(grid)
values = search(values)
return values
if __name__ == "__main__":
diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
display(grid2values(diag_sudoku_grid))
result = solve(diag_sudoku_grid)
display(result)
try:
import PySudoku
PySudoku.play(grid2values(diag_sudoku_grid), result, history)
except SystemExit:
pass
except:
print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')
``` |
{
"source": "1K5KKKKKei/sensorutils",
"score": 4
} |
#### File: src/sensorutils/metrics.py
```python
import typing
import numpy as np
def mae(true:np.ndarray, pred:np.ndarray, axis:typing.Optional[int]=None) -> typing.Union[float, np.ndarray]:
"""Calc Mean Absolute Error.
$$
\\frac{1}{N}\sum_{i=0}^{N} |\hat{y}_i - y_i|
$$
Parameters
----------
true: np.ndarray
true data.
pred: np.ndarray
predicted data.
Returns
-------
:Union[float, np.ndarray]
MAE
Examples
--------
>>> a = np.random.randn(2, 3, 4)
>>> b = np.random.randn(2, 3, 4)
>>> mae(a, b)
>>> mae(a, b, axis=2)
"""
return np.abs(true - pred).mean(axis=axis)
def mape(true:np.ndarray, pred:np.ndarray, axis:typing.Optional[int]=None) -> typing.Union[float, np.ndarray]:
"""Calc Mean Absolute Persentage Error.
$$
\\frac{100}{N}\sum_{i=0}^{N} \\left| \\frac{\hat{y}_i - y_i}{y_i} \\right|
$$
Parameters
----------
true: np.ndarray
true data.
pred: np.ndarray
predicted data.
Returns
-------
:Union[float, np.ndarray]
MAPE
Examples
--------
>>> a = np.random.randn(2, 3, 4)
>>> b = np.random.randn(2, 3, 4)
>>> mape(a, b)
>>> mape(a, b, axis=2)
"""
return mae(np.ones_like(true), pred / true, axis) * 100
def mse(true:np.ndarray, pred:np.ndarray, axis:typing.Optional[int]=None) -> typing.Union[float, np.ndarray]:
"""Calc Mean Squared Error.
$$
\\frac{1}{N}\sum_{i=0}^{N} (dst_i - src_i)^2
$$
Parameters
----------
true: np.ndarray
true data.
pred: np.ndarray
predicted data.
Returns
-------
:Union[float, np.ndarray]
MSE
Examples
--------
>>> a = np.random.randn(2, 3, 4)
>>> b = np.random.randn(2, 3, 4)
>>> mse(a, b)
>>> mse(a, b, axis=2)
"""
return (np.square(true - pred)).mean(axis=axis)
def rmse(true:np.ndarray, pred:np.ndarray, axis:typing.Optional[int]=None) -> typing.Union[float, np.ndarray]:
"""Calc Root Mean Squared Error.
$$
\\left(\\frac{1}{N}\sum_{i=0}^{N} (\hat{y}_i - y_i)^2 \\right)^{\\frac{1}{2}}
$$
Parameters
----------
true: np.ndarray
true data.
pred: np.ndarray
predicted data.
Returns
-------
:Union[float, np.ndarray]
RMSE
Examples
--------
>>> a = np.random.randn(2, 3, 4)
>>> b = np.random.randn(2, 3, 4)
>>> rmse(a, b)
>>> rmse(a, b, axis=2)
"""
return np.sqrt(mse(true, pred, axis))
def rmspe(true:np.ndarray, pred:np.ndarray, axis:typing.Optional[int]=None) -> typing.Union[float, np.ndarray]:
"""Calc Root Mean Squared Persentage Error.
$$
100 \\left(\\frac{1}{N}\sum_{i=0}^{N} (\\frac{\hat{y}_i - y_i}{y_i})^2 \\right)^{\\frac{1}{2}}
$$
Parameters
----------
true: np.ndarray
true data.
pred: np.ndarray
predicted data.
Returns
-------
:Union[float, np.ndarray]
RMSPE
Example
-------
>>> a = np.random.randn(2, 3, 4)
>>> b = np.random.randn(2, 3, 4)
>>> rmspe(a, b)
>>> rmspe(a, b, axis=2)
"""
return rmse(np.ones_like(true), pred / true, axis) * 100
def rmsle(true:np.ndarray, pred:np.ndarray, axis:typing.Optional[int]=None) -> typing.Union[float, np.ndarray]:
"""Calc root mean squared logarithmic error.
$$
\\left(\\frac{1}{N}\sum_{i=0}^{N} (\log (\hat{y}_i + 1) - \log (y_i + 1))^2 \\right)^{\\frac{1}{2}}
$$
Parameters
----------
true: np.ndarray
clean data
pred: np.ndarray
with noise
axis: Optional[int], default=None
mean axis
Returns
-------
:Union[float, np.ndarray]
RMSLE
Examples
--------
>>> a = np.random.randn(2, 3, 4)
>>> b = np.random.randn(2, 3, 4)
>>> rmsle(a, b)
>>> rmsle(a, b, axis=2)
"""
return rmse(np.log(true + 1), np.log(pred + 1), axis=axis)
def r2(true:np.ndarray, pred:np.ndarray) -> float:
"""Calc r2 score(coefficient of determination).
$$
{R^{2}}( \hat{y} ) := 1 - \\frac{ \\frac{1}{N} \sum_{i=1}^{N} { ( {y}_i - \hat{y}_{i} ) }^{2} }{ \\frac{1}{N} \sum_{i=1}^{N} { ( {y}_i - \\bar{y}) }^{2} } = 1 - \\frac{M S E(\hat{y})}{Var(y)}
$$
Parameters
----------
true: np.ndarray
clean data
pred: np.ndarray
with noise
Returns
-------
:float
coefficient of determination
Examples
--------
>>> a = np.random.randn(2, 3, 4)
>>> b = np.random.randn(2, 3, 4)
>>> r2(a, b)
"""
return 1 - (mse(true, pred) / np.var(true))
def snr(true:np.ndarray, pred:np.ndarray, axis:typing.Optional[int]=None) -> typing.Union[float, np.ndarray]:
"""Calc Signal to Noise Ratio.
$$
10 \log_{10} \\left(\\frac{\sum_{i=0}^{N}true_i^2}{\sum_{i=0}^{N}(true_i - pred_i)^2} \\right)
$$
Parameters
----------
true: np.ndarray
clean data
pred: np.ndarray
with noise
axis: Optional[int]
mean axis (default=None)
Returns
-------
:Union[float, np.ndarray]
SNR
Examples
--------
>>> a = np.random.randn(2, 3, 4)
>>> b = np.random.randn(2, 3, 4)
>>> snr(a, b)
>>> snr(a, b, axis=2)
"""
assert true.shape == pred.shape, 'true.shape ({}) == pred.shape ({})'.format(true.shape, pred.shape)
noise_mse = (np.square(true - pred)).sum(axis=axis)
signal_ms = (np.square(true)).sum(axis=axis)
return 10 * np.log10((signal_ms / noise_mse))
def lsd(true_spec:np.ndarray, pred_spec:np.ndarray, axis:typing.Optional[int]=None) -> typing.Union[float, np.ndarray]:
"""Calc Log Spectral Distance.
$$
\mathrm{LSD}(S(\omega),\\tilde{S}(\omega)) =
\sqrt{\\frac{1}{W}\sum_{\omega}^{W} \\left(20\log_{10}\\left|\\frac{S(\omega)}{\\tilde{S}(\omega)}\\right|\\right)^2}
$$
$S(\omega)$と$\\tilde{S}(\omega)$ は,それぞれ原波形と雑音抑圧波形の対数スペクトル.
複数の短時間スペクトルの距離は各スペクトルで距離を算出した後,平均を取ること.
Parameters
----------
true_spec: np.ndarray
spectral 1
pred_spec: np.ndarray
spectral 2
axis: Optional[int]
Not use
Returns
-------
: Union[float, np.ndarray]
LSD
Examples
--------
>>> a = np.random.randn(2, 3, 4)
>>> b = np.random.randn(2, 3, 4)
>>> lsd(a, b).shape
"""
return np.sqrt(np.mean(20 * np.log10(np.abs(true_spec / (true_spec - pred_spec)))))
```
#### File: sensorutils/tests/test_hasc_loader.py
```python
import sys
import unittest
import numpy as np
import pandas as pd
from pathlib import Path
sys.path.append('../src/')
from sensorutils.datasets.hasc import HASC, load, load_raw, load_meta, reformat
class HASCTest(unittest.TestCase):
path = None
cache_path = None
@classmethod
def setUpClass(cls) -> None:
if cls.path is None:
raise RuntimeError('dataset path is not specified')
if cls.cache_path is None:
raise RuntimeError('dataset cache path is not specified')
def setUp(self):
self.loader = HASC(self.path, self.cache_path)
def tearDown(self):
pass
@classmethod
def _gen_small_meta(cls, meta):
# filed_meta = meta.query('Frequency == 100 & Person in ["person01068", "person03053", "person02033", "person01106", "person03079"] & Height > 170')
filed_meta = meta.query('Person in ["person01068", "person03053", "person02033", "person01106", "person03079"]')
return filed_meta
def test_load_fn(self):
meta_src = self._gen_small_meta(self.loader.meta.copy())
data, meta = load(self.path, meta=meta_src)
# data
## type check
self.assertIsInstance(data, list)
self.assertTrue(all(isinstance(d, pd.DataFrame) for d in data))
# meta
## type check
self.assertIsInstance(meta, pd.DataFrame)
self.assertEqual(len(data), len(meta))
## data check
self.assertTrue(meta_src.equals(meta))
def test_load_raw_fn_base(self):
meta_src = self._gen_small_meta(self.loader.meta.copy())
raw = load_raw(self.path, meta=meta_src)
# raw
## type check
self.assertIsInstance(raw, tuple)
## shape check
self.assertEqual(len(raw), 2)
data, meta = raw
# data
## type check
self.assertIsInstance(data, list)
self.assertTrue(all(isinstance(d, pd.DataFrame) for d in data))
# meta
## type check
self.assertIsInstance(meta, pd.DataFrame)
self.assertEqual(len(data), len(meta))
## data check
self.assertTrue(meta_src.equals(meta))
@unittest.skip
def test_load_meta_fn(self):
meta = load_meta(self.path)
self.assertIsInstance(meta, pd.DataFrame)
@unittest.skip
def test_reformat_fn(self):
data, meta = load_raw(self.path, meta=self._gen_small_meta(self.loader.meta.copy()))
data, _ = reformat(data, meta)
# data
## type check
self.assertIsInstance(data, list)
self.assertTrue(all(isinstance(d, pd.DataFrame) for d in data))
# meta
## type check
self.assertIsInstance(meta, pd.DataFrame)
## compare between data and meta
self.assertEqual(len(data), len(meta))
@unittest.skip
def test_hasc_meta_map(self):
_ = self.loader.load(window_size=256, stride=256, ftrim=5, btrim=5, y_labels=['activity', 'frequency', 'gender', 'height', 'weight', 'person'])
...
def test_hasc_load_method_base(self):
# subjects = ['person01068', 'person03053', 'person02033', 'person01106', 'person03079']
# queries = {'Person': 'Person in {}'.format(subjects)}
queries = None
y_labels = ['activity', 'person']
_, _, label_map = self.loader.load(window_size=256, stride=256, ftrim=5, btrim=5, queries=queries, y_labels=y_labels)
## type check
self.assertIsInstance(label_map, dict)
## data check
self.assertTrue(set(y_labels), set(label_map.keys()))
for k in label_map:
M = label_map[k]
if bool(M):
with self.subTest(f'key of label map: {k}'):
self.assertSetEqual(set(M.values()), set(range(min(list(M.values())), max(list(M.values()))+1)))
def test_hasc_load_method_framing(self):
subjects = ['person01068', 'person03053', 'person02033', 'person01106', 'person03079']
queries = {'Person': 'Person in {}'.format(subjects)}
y_labels = ['activity', 'person']
## shape check
for ws in [128, 256, 512]:
x, _, _= self.loader.load(window_size=ws, stride=ws, ftrim=5, btrim=5, queries=queries, y_labels=y_labels)
with self.subTest(f'window_size: {ws}'):
self.assertTupleEqual(x.shape[1:], (3, ws))
def test_hasc_load_method_ylabels(self):
subjects = ['person01068', 'person03053', 'person02033', 'person01106', 'person03079']
queries = {'Person': 'Person in {}'.format(subjects)}
y_labels = ['activity', 'frequency', 'gender', 'height', 'weight', 'person']
for n in range(1, len(y_labels)):
with self.subTest(f'number of y labels: {n}'):
Y = np.random.choice(y_labels, n)
_, y, label_map = self.loader.load(window_size=256, stride=256, ftrim=5, btrim=5, queries=queries, y_labels=Y)
## type check
self.assertIsInstance(label_map, dict)
## data check
self.assertTrue(set(y_labels), set(label_map.keys()))
## shape check
self.assertEqual(y.shape[1], n)
def test_hasc_load_method_filed_meta(self):
subjects = ['person01068', 'person03053', 'person02033', 'person01106', 'person03079', 'person02007', 'person01085', 'person01060', 'person01103', 'person03032', 'person01107', 'person01045', 'person02063', 'person03055', 'person01066', 'person03001', 'person01039', 'person01113', 'person03034', 'person03056', 'person02100', 'person01087', 'person01089', 'person01109', 'person01017', 'person01063', 'person01098', 'person03038', 'person02012', 'person01097', 'person03036', 'person03033', 'person01078', 'person02068', 'person03076', 'person01040', 'person02024', 'person01073', 'person02040']
with self.subTest('filtered by activity'):
queries = {'Person': 'Person in {}'.format(subjects)}
_, y, label_map = self.loader.load(window_size=256, stride=256, ftrim=5, btrim=5, queries=queries, y_labels=['activity', 'frequency', 'gender', 'height', 'weight', 'person'])
# type check
self.assertIsInstance(label_map, dict)
# data check
self.assertSetEqual(set(label_map['activity'].keys()), set(['1_stay', '2_walk', '3_jog', '4_skip', '5_stUp', '6_stDown']))
self.assertEqual(y[:, 0].min(), 0)
self.assertEqual(y[:, 0].max(), 5)
g = 'male'
with self.subTest(f'filtered by gender(gender == "{g}")'):
queries = {'Gender': f'Gender == "{g}"', 'Person': f'Person in {subjects}'}
_, y, label_map = self.loader.load(window_size=256, stride=256, ftrim=5, btrim=5, queries=queries, y_labels=['activity', 'frequency', 'gender', 'height', 'weight', 'person'])
self.assertIsInstance(label_map['gender'], dict)
self.assertTrue(np.all(y[:, 2] == label_map['gender'][g]))
h = 170
with self.subTest(f'filtered by height (height <= {h})'):
queries = {'Height': f'Height <= {h}', 'Person': f'Person in {subjects}'}
_, y, label_map = self.loader.load(window_size=256, stride=256, ftrim=5, btrim=5, queries=queries, y_labels=['activity', 'frequency', 'gender', 'height', 'weight', 'person'])
self.assertDictEqual(label_map['height'], {})
self.assertTrue(np.all(y[:, 3] <= h))
w = 80
with self.subTest(f'filtered by weight (weight >= {w})'):
queries = {'Weight': f'Weight >= {w}', 'Person': f'Person in {subjects}'}
_, y, label_map = self.loader.load(window_size=256, stride=256, ftrim=5, btrim=5, queries=queries, y_labels=['activity', 'frequency', 'gender', 'height', 'weight', 'person'])
self.assertDictEqual(label_map['height'], {})
self.assertTrue(np.all(y[:, 4] >= w))
freq = 50
with self.subTest(f'filtered by frequency (frequency == {freq})'):
queries = {'Frequency': f'Frequency == {freq}', 'Person': f'Person in {subjects}'}
_, y, label_map = self.loader.load(window_size=256, stride=256, ftrim=5, btrim=5, queries=queries, y_labels=['activity', 'frequency', 'gender', 'height', 'weight', 'person'])
self.assertDictEqual(label_map['height'], {})
self.assertTrue(np.all(y[:, 1] == freq))
with self.subTest('filtered by subject'):
queries = {'Person': f'Person in {subjects}'}
_, y, label_map = self.loader.load(window_size=256, stride=256, ftrim=5, btrim=5, queries=queries, y_labels=['activity', 'frequency', 'gender', 'height', 'weight', 'person'])
self.assertIsInstance(label_map, dict)
self.assertSetEqual(set(label_map['person'].keys()), set(subjects))
# self.assertEqual(y[:, 5].min(), 0)
# self.assertEqual(y[:, 5].max(), len(subjects)-1)
self.assertSetEqual(set(subjects), set(label_map['person'].keys()))
self.assertSetEqual(set([
1068, 3053, 2033, 1106, 3079,
2007, 1085, 1060, 1103, 3032,
1107, 1045, 2063, 3055, 1066,
3001, 1039, 1113, 3034, 3056,
2100, 1087, 1089, 1109, 1017,
1063, 1098, 3038, 2012, 1097,
3036, 3033, 1078, 2068, 3076,
1040, 2024, 1073, 2040
]), set(label_map['person'].values()))
# self.assertIsInstance(raw, tuple)
# self.assertEqual(len(raw), 2)
# data, meta = raw
# self.assertIsInstance(data, list)
# self.assertTrue(all(isinstance(d, pd.DataFrame) for d in data))
# self.assertIsInstance(meta, pd.DataFrame)
# self.assertEqual(len(data), len(meta))
if __name__ == '__main__':
args = sys.argv
if len(args) != 3:
sys.stderr.write('Usage: {} <dataset path> <cache path>'.format(args[0]))
sys.exit(1)
ds_path = Path(args[1])
cache_path = Path(args[2])
HASCTest.path = ds_path
HASCTest.cache_path = cache_path
unittest.main(verbosity=2, argv=args[0:1])
``` |
{
"source": "1-kane/Internship-Project",
"score": 4
} |
#### File: 1-kane/Internship-Project/python project.py
```python
from nltk import *
from nltk.corpus import *
def lang_ratio(input):
lang_ratio={}
tokens = wordpunct_tokenize(input)
words = [word.lower() for word in tokens]
for language in stopwords.fileids():
stopwords_set = set(stopwords.words(language))
words_set = set(words)
common_elements = words_set.intersection(stopwords_set)
lang_ratio[language] = len(common_elements)
return lang_ratio
def detect_language(input):
ratios = lang_ratio(input)
lang = max(ratios, key = ratios.get)
return lang
ans = 'Y'
while((ans=='y')|(ans=='Y')):
input1 = input("Write a scentence")
lang = detect_language(input1)
print(input1+"\t Langauge: "+ lang)
ans = input("to do this again enter (y/Y)")
``` |
{
"source": "1kastner/conflowgen",
"score": 2
} |
#### File: conflowgen/api/container_flow_generation_manager.py
```python
import datetime
import logging
from typing import Union, Dict, Optional
from conflowgen.application.repositories.container_flow_generation_properties_repository import \
ContainerFlowGenerationPropertiesRepository
from conflowgen.flow_generator.container_flow_generation_service import \
ContainerFlowGenerationService
class ContainerFlowGenerationManager:
"""
This manager provides the interface to set the properties (i.e., not the distributions that are handled elsewhere)
and trigger the synthetic container flow generation.
If not provided, for many of these values `default values <notebooks/input_distributions.ipynb#Default-Values>`_
exist.
"""
def __init__(self):
self.container_flow_generation_service = ContainerFlowGenerationService()
self.container_flow_generation_properties_repository = ContainerFlowGenerationPropertiesRepository()
self.logger = logging.getLogger("conflowgen")
def set_properties(
self,
start_date: datetime.date,
end_date: datetime.date,
name: Optional[str] = None,
minimum_dwell_time_of_import_containers_in_hours: Optional[int] = None,
maximum_dwell_time_of_import_containers_in_hours: Optional[int] = None,
minimum_dwell_time_of_export_containers_in_hours: Optional[int] = None,
maximum_dwell_time_of_export_containers_in_hours: Optional[int] = None,
minimum_dwell_time_of_transshipment_containers_in_hours: Optional[int] = None,
maximum_dwell_time_of_transshipment_containers_in_hours: Optional[int] = None,
transportation_buffer: Optional[float] = None
) -> None:
"""
Args:
start_date: The earliest day any scheduled vehicle arrives. Trucks that drop off containers might arrive
earlier though.
end_date: The latest day any scheduled vehicle arrives. Trucks that pick up containers might arrive later
though.
name: The name of the generated synthetic container flow which helps to distinguish different scenarios.
minimum_dwell_time_of_import_containers_in_hours: No vehicle arrives earlier than this amount of hours
to pick up an import container that has previously been dropped off.
maximum_dwell_time_of_import_containers_in_hours: No vehicles arrives later than this amount of hours after
the previous vehicle which has dropped off the import container has arrived.
minimum_dwell_time_of_export_containers_in_hours: No vehicle arrives earlier than this amount of hours
to pick up an export container that has previously been dropped off.
maximum_dwell_time_of_export_containers_in_hours: No vehicles arrives later than this amount of hours after
the previous vehicle which has dropped off the export container has arrived.
minimum_dwell_time_of_transshipment_containers_in_hours: No vehicle arrives earlier than this amount of
hours to pick up a transshipment container that has previously been dropped off.
maximum_dwell_time_of_transshipment_containers_in_hours: No vehicles arrives later than this amount of hours
after the previous vehicle which has dropped off the transshipment container has arrived.
transportation_buffer: Determines how many percent more of the inbound journey capacity is used at most to
transport containers on the outbound journey.
"""
properties = self.container_flow_generation_properties_repository.get_container_flow_generation_properties()
if name is not None:
properties.name = name
properties.start_date = start_date
properties.end_date = end_date
if minimum_dwell_time_of_import_containers_in_hours is not None:
properties.minimum_dwell_time_of_import_containers_in_hours = \
minimum_dwell_time_of_import_containers_in_hours
if maximum_dwell_time_of_import_containers_in_hours is not None:
properties.maximum_dwell_time_of_import_containers_in_hours = \
maximum_dwell_time_of_import_containers_in_hours
if minimum_dwell_time_of_export_containers_in_hours is not None:
properties.minimum_dwell_time_of_export_containers_in_hours = \
minimum_dwell_time_of_export_containers_in_hours
if maximum_dwell_time_of_export_containers_in_hours is not None:
properties.maximum_dwell_time_of_export_containers_in_hours = \
maximum_dwell_time_of_export_containers_in_hours
if minimum_dwell_time_of_transshipment_containers_in_hours is not None:
properties.minimum_dwell_time_of_transshipment_containers_in_hours = \
minimum_dwell_time_of_transshipment_containers_in_hours
if maximum_dwell_time_of_transshipment_containers_in_hours is not None:
properties.maximum_dwell_time_of_transshipment_containers_in_hours = \
maximum_dwell_time_of_transshipment_containers_in_hours
if transportation_buffer is not None:
properties.transportation_buffer = transportation_buffer
self.container_flow_generation_properties_repository.set_container_flow_generation_properties(
properties
)
def get_properties(self) -> Dict[str, Union[str, datetime.date, float, int]]:
"""
Returns:
The properties of the container flow.
"""
properties = self.container_flow_generation_properties_repository.get_container_flow_generation_properties()
return {
'name': properties.name,
'start_date': properties.start_date,
'end_date': properties.end_date,
'transportation_buffer': properties.transportation_buffer,
'minimum_dwell_time_of_import_containers_in_hours':
properties.minimum_dwell_time_of_import_containers_in_hours,
'minimum_dwell_time_of_export_containers_in_hours':
properties.minimum_dwell_time_of_export_containers_in_hours,
'minimum_dwell_time_of_transshipment_containers_in_hours':
properties.minimum_dwell_time_of_transshipment_containers_in_hours,
'maximum_dwell_time_of_import_containers_in_hours':
properties.maximum_dwell_time_of_import_containers_in_hours,
'maximum_dwell_time_of_export_containers_in_hours':
properties.maximum_dwell_time_of_export_containers_in_hours,
'maximum_dwell_time_of_transshipment_containers_in_hours':
properties.maximum_dwell_time_of_transshipment_containers_in_hours
}
def container_flow_data_exists(self) -> bool:
"""
When an existing database is opened, pre-existing container flow data could already be stored inside.
Invoking :meth:`.ContainerFlowGenerationManager.generate` again would reset that stored data.
You might want to skip that set and just re-use the data already stored in the database.
Returns:
Whether container flow data exists in the database.
"""
return self.container_flow_generation_service.container_flow_data_exists()
def generate(self, overwrite: bool = True) -> None:
"""
Generate the synthetic container flow according to all the information stored in the database so far.
This triggers a multistep procedure of generating vehicles and the containers which are delivered or picked up
by the vehicles.
More is described in the Section
`Data Generation Process <background.rst#data-generation-process>`_.
The invocation of this method overwrites any already existent data in the database.
Consider checking for
:meth:`.ContainerFlowGenerationManager.container_flow_data_exists`
and skip invoking this method.
"""
if not overwrite and self.container_flow_data_exists():
self.logger.debug("Data already exists and it was not asked to overwrite existent data, skip this.")
return
self.container_flow_generation_service.generate()
```
#### File: domain_models/distribution_repositories/__init__.py
```python
import logging
import math
from typing import Dict, Any, Optional
logger = logging.getLogger("conflowgen")
def normalize_distribution_with_no_dependent_variable(
distribution: Dict[Any, float],
context: Optional[Any] = None
) -> Dict[Any, float]:
keys, fractions = zip(*distribution.items())
sum_of_fractions = sum(fractions)
if not math.isclose(sum_of_fractions, 1):
context_text = f"for '{context}' " if context else ""
logger.debug(f"Sum of fractions was not 1 {context_text}and was automatically normalized.")
fractions = [fraction / sum_of_fractions for fraction in fractions]
for fraction in fractions:
assert fraction >= 0
normalized_distribution = dict(zip(keys, fractions))
return normalized_distribution
def normalize_distribution_with_one_dependent_variable(
distributions: Dict[Any, Dict[Any, float]]
) -> Dict[Any, Dict[Any, float]]:
normalized_distributions = {}
for first_level_key, second_level_distribution in distributions.items():
normalized_second_level_distribution = normalize_distribution_with_no_dependent_variable(
second_level_distribution, context=first_level_key
)
normalized_distributions[first_level_key] = normalized_second_level_distribution
return normalized_distributions
```
#### File: domain_models/distribution_seeders/container_length_distribution_seeder.py
```python
from conflowgen.domain_models.distribution_repositories.container_length_distribution_repository import \
ContainerLengthDistributionRepository
from conflowgen.domain_models.data_types.container_length import ContainerLength
#: In general, most containerized goods are transported in 20' and 40' sea containers.
#: In Germany in August 2021, only 1% of containerized goods (measured in weight) were transported in a container
#: different from these two standard sizes :cite:p:`destatis.seeschifffahrt.august.2021`.
#: The same statistics says that approximately 30% of the goods (measured in weight again) are transported in 20'
#: containers, and 40' containers make up 67%.
#: For ConFlowGen, however, the fraction in numbers of containers is required instead of the fraction based on weight.
#: In an expert interview it was said that the TEU factor in their case is approximately 1.6 and 45 foot containers made
#: up less than 5%.
#:
#: The numbers used here are inspired by the reported statistics and the expert interview.
#: They are believed to be a reasonable first assumption if no data is available.
DEFAULT_CONTAINER_LENGTH_FREQUENCIES = {
ContainerLength.twenty_feet: 0.4,
ContainerLength.forty_feet: 0.57,
ContainerLength.forty_five_feet: 0.029,
ContainerLength.other: 0.001
}
def seed():
ContainerLengthDistributionRepository().set_distribution(
DEFAULT_CONTAINER_LENGTH_FREQUENCIES
)
```
#### File: conflowgen/posthoc_analyses/inbound_to_outbound_vehicle_capacity_utilization_analysis_report.py
```python
from __future__ import annotations
from typing import Tuple, Any, Dict, Iterable, Optional
import matplotlib.pyplot as plt
import matplotlib.ticker
import pandas as pd
from conflowgen.domain_models.data_types.mode_of_transport import ModeOfTransport
from conflowgen.posthoc_analyses.inbound_to_outbound_vehicle_capacity_utilization_analysis import \
InboundToOutboundVehicleCapacityUtilizationAnalysis, CompleteVehicleIdentifier
from conflowgen.reporting import AbstractReportWithMatplotlib
from conflowgen.reporting.no_data_plot import no_data_graph
class InboundToOutboundVehicleCapacityUtilizationAnalysisReport(AbstractReportWithMatplotlib):
"""
This analysis report takes the data structure as generated by :class:`.InboundToOutboundCapacityUtilizationAnalysis`
and creates a comprehensible representation for the user, either as text or as a graph.
"""
report_description = """
Analyze the used vehicle capacity for each vehicle for the inbound and outbound journeys.
Generally, it expected to reach an equilibrium - each vehicle should approximately pick up as many containers
at the container terminal as it has delivered.
Great disparities between the transported capacities on the inbound and outbound journey are considered noteworthy
but depending on the input data it might be acceptable.
"""
maximum_length_for_readable_name = 50 # doc: Each vehicle has a name that might be a bit lengthy for text output
plot_title = "Capacity utilization analysis"
def __init__(self):
super().__init__()
self.analysis = InboundToOutboundVehicleCapacityUtilizationAnalysis(
transportation_buffer=self.transportation_buffer
)
@classmethod
def _create_readable_name(cls, vehicle_identifier: Tuple[Any]) -> str:
name = "-".join(str(part) for part in vehicle_identifier)
if len(name) > cls.maximum_length_for_readable_name:
name = name[:46] + "..."
return name
def get_report_as_text(self, **kwargs) -> str:
"""
The report as a text is represented as a table suitable for logging. It uses a human-readable formatting style.
Keyword Args:
vehicle_type: Either ``"all"``, a single vehicle of type :class:`.ModeOfTransport` or a whole collection of
vehicle types, e.g. passed as a :class:`list` or :class:`set`.
For the exact interpretation of the parameter, check
:class:`.InboundToOutboundVehicleCapacityUtilizationAnalysis`.
Returns:
The report in text format (possibly spanning over several lines).
"""
vehicle_type, capacities = self._get_capacities_depending_on_vehicle_type(kwargs)
report = "\n"
report += "vehicle type = " + vehicle_type + "\n"
report += "vehicle identifier "
report += "inbound capacity (in TEU) "
report += "outbound capacity (in TEU)"
report += "\n"
for vehicle_identifier, (used_inbound_capacity, used_outbound_capacity) in capacities.items():
vehicle_name = self._create_readable_name(vehicle_identifier)
report += f"{vehicle_name:<50} " # align this with cls.maximum_length_for_readable_name!
report += f"{used_inbound_capacity:>25.1f} "
report += f"{used_outbound_capacity:>26.1f}"
report += "\n"
if len(capacities) == 0:
report += "--no vehicles exist--\n"
else:
report += "(rounding errors might exist)\n"
return report
def get_report_as_graph(self, **kwargs) -> object:
"""
The report as a graph is represented as a scatter plot using pandas.
Keyword Args:
plot_type: Either "absolute", "relative", or "both". Defaults to "both".
vehicle_type: Either ``"all"``, a single vehicle of type :class:`.ModeOfTransport` or a whole collection of
vehicle types, e.g. passed as a :class:`list` or :class:`set`.
For the exact interpretation of the parameter, check
:class:`.InboundToOutboundVehicleCapacityUtilizationAnalysis`.
Returns:
The matplotlib figure
"""
plot_type = kwargs.get("plot_type", "both")
vehicle_type, capacities = self._get_capacities_depending_on_vehicle_type(kwargs)
if len(capacities) == 0:
return no_data_graph()
df = self._convert_analysis_to_df(capacities)
if plot_type == "absolute":
fig, ax = plt.subplots(1, 1)
self._plot_absolute_values(df, vehicle_type, ax=ax)
elif plot_type == "relative":
fig, ax = plt.subplots(1, 1)
self._plot_relative_values(df, vehicle_type, ax=ax)
elif plot_type == "both":
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
self._plot_absolute_values(df, vehicle_type, ax=ax1)
self._plot_relative_values(df, vehicle_type, ax=ax2)
plt.subplots_adjust(wspace=0.4)
else:
raise Exception(f"Plot type '{plot_type}' is not supported.")
plt.legend(
loc='lower left',
bbox_to_anchor=(1, 0),
fancybox=True,
)
return fig
@staticmethod
def _get_vehicle_type_representation(vehicle_type: Any) -> str:
if vehicle_type is None:
return "all"
if isinstance(vehicle_type, ModeOfTransport):
return str(vehicle_type)
if isinstance(vehicle_type, Iterable):
return " & ".join([str(element) for element in vehicle_type])
return str(vehicle_type)
def _plot_absolute_values(
self, df: pd.DataFrame, vehicle_type: str, ax: Optional[matplotlib.pyplot.axis] = None
) -> matplotlib.pyplot.axis:
ax = df.plot.scatter(x="inbound capacity (fixed)", y="used outbound capacity", ax=ax)
slope = 1 + self.transportation_buffer
ax.axline((0, 0), slope=slope, color='black', label='Maximum outbound capacity')
ax.axline((0, 0), slope=1, color='gray', label='Equilibrium')
ax.set_title(self.plot_title + " (absolute),\n vehicle type = " + vehicle_type)
ax.set_aspect('equal', adjustable='box')
ax.grid(color='lightgray', linestyle=':', linewidth=.5)
maximum = df[["inbound capacity (fixed)", "used outbound capacity"]].max(axis=1).max(axis=0)
axis_limitation = maximum * 1.1 # add some white space to the top and left
ax.set_xlim([0, axis_limitation])
ax.set_ylim([0, axis_limitation])
return ax
def _plot_relative_values(
self, df: pd.DataFrame, vehicle_type: str, ax: Optional[matplotlib.pyplot.axis] = None
) -> matplotlib.pyplot.axis:
ax = df.plot.scatter(x="inbound capacity (fixed)", y="ratio", ax=ax)
ax.axline((0, (1 + self.transportation_buffer)), slope=0, color='black', label='Maximum outbound capacity')
ax.axline((0, 1), slope=0, color='gray', label='Equilibrium')
ax.set_title(self.plot_title + " (relative),\n vehicle type = " + vehicle_type)
ax.grid(color='lightgray', linestyle=':', linewidth=.5)
return ax
def _convert_analysis_to_df(self, capacities: Dict[CompleteVehicleIdentifier, Tuple[float, float]]) -> pd.DataFrame:
rows = []
for vehicle_identifier, (inbound_capacity, used_outbound_capacity) in capacities.items():
vehicle_name = self._create_readable_name(vehicle_identifier)
rows.append({
"vehicle name": vehicle_name,
"inbound capacity (fixed)": inbound_capacity,
"used outbound capacity": used_outbound_capacity
})
df = pd.DataFrame(rows)
df["ratio"] = df["used outbound capacity"] / df["inbound capacity (fixed)"]
return df
def _get_capacities_depending_on_vehicle_type(
self, kwargs
) -> Tuple[str, Dict[CompleteVehicleIdentifier, Tuple[float, float]]]:
if "vehicle_type" in kwargs:
vehicle_type = kwargs["vehicle_type"]
capacities = self.analysis.get_inbound_and_outbound_capacity_of_each_vehicle(
vehicle_type=vehicle_type
)
else:
vehicle_type = None
capacities = self.analysis.get_inbound_and_outbound_capacity_of_each_vehicle()
return self._get_vehicle_type_representation(vehicle_type), capacities
```
#### File: conflowgen/posthoc_analyses/modal_split_analysis.py
```python
from __future__ import annotations
from typing import Dict
from conflowgen.domain_models.data_types.mode_of_transport import ModeOfTransport
from conflowgen.posthoc_analyses.abstract_posthoc_analysis import AbstractPostHocAnalysis
from conflowgen.posthoc_analyses.container_flow_by_vehicle_type_analysis import ContainerFlowByVehicleTypeAnalysis
from conflowgen.descriptive_datatypes import TransshipmentAndHinterlandComparison
from conflowgen.descriptive_datatypes import HinterlandModalSplit
class ModalSplitAnalysis(AbstractPostHocAnalysis):
"""
This analysis can be run after the synthetic data has been generated.
The analysis returns a data structure that can be used for generating reports (e.g., in text or as a figure)
as it is the case with :class:`.ModalSplitAnalysisReport`.
"""
vessels_considered_for_transshipment = {
ModeOfTransport.deep_sea_vessel,
ModeOfTransport.feeder
}
vehicles_considered_for_hinterland = {
ModeOfTransport.truck,
ModeOfTransport.barge,
ModeOfTransport.train
}
def __init__(self):
super().__init__()
self.container_flow_by_vehicle_type_analysis = ContainerFlowByVehicleTypeAnalysis()
def get_transshipment_and_hinterland_fraction(self) -> TransshipmentAndHinterlandComparison:
"""
Returns:
The amount of containers in TEU dedicated for or coming from the hinterland versus the amount of containers
in TEU that are transshipped.
"""
inbound_to_outbound_flow = self.container_flow_by_vehicle_type_analysis.get_inbound_to_outbound_flow()
transshipment_capacity = 0
hinterland_capacity = 0
for inbound_vehicle_type in inbound_to_outbound_flow.keys():
for outbound_vehicle_type, capacity in inbound_to_outbound_flow[inbound_vehicle_type].items():
if (inbound_vehicle_type in self.vessels_considered_for_transshipment
and outbound_vehicle_type in self.vessels_considered_for_transshipment):
transshipment_capacity += capacity
else:
hinterland_capacity += capacity
return TransshipmentAndHinterlandComparison(
transshipment_capacity=transshipment_capacity,
hinterland_capacity=hinterland_capacity
)
def get_modal_split_for_hinterland(
self,
inbound: bool,
outbound: bool
) -> HinterlandModalSplit:
"""
Args:
inbound: Whether to account for inbound journeys
outbound: Whether to account for outbound journeys
Returns:
The modal split for the hinterland as generated.
"""
inbound_to_outbound_flow = self.container_flow_by_vehicle_type_analysis.get_inbound_to_outbound_flow()
transported_capacity: Dict[ModeOfTransport, float] = {
ModeOfTransport.truck: 0,
ModeOfTransport.train: 0,
ModeOfTransport.barge: 0
}
if (not inbound) and (not outbound):
raise ValueError("The modal split must cover either the inbound traffic, the outbound traffic, or both")
for inbound_vehicle_type, inbound_capacity in inbound_to_outbound_flow.items():
for outbound_vehicle_type, capacity in inbound_to_outbound_flow[inbound_vehicle_type].items():
if inbound and inbound_vehicle_type in self.vehicles_considered_for_hinterland:
transported_capacity[inbound_vehicle_type] += capacity
if outbound and outbound_vehicle_type in self.vehicles_considered_for_hinterland:
transported_capacity[outbound_vehicle_type] += capacity
return HinterlandModalSplit(
train_capacity=transported_capacity[ModeOfTransport.train],
barge_capacity=transported_capacity[ModeOfTransport.barge],
truck_capacity=transported_capacity[ModeOfTransport.truck]
)
```
#### File: conflowgen/previews/vehicle_capacity_exceeded_preview_report.py
```python
from __future__ import annotations
from typing import Dict
import pandas as pd
import seaborn as sns
from conflowgen.domain_models.data_types.mode_of_transport import ModeOfTransport
from conflowgen.previews.vehicle_capacity_exceeded_preview import VehicleCapacityExceededPreview
from conflowgen.reporting import AbstractReportWithMatplotlib
class VehicleCapacityExceededPreviewReport(AbstractReportWithMatplotlib):
"""
This preview report takes the data structure as generated by
:class:`.VehicleCapacityExceededPreview`
and creates a comprehensible representation for the user, either as text or as a graph.
A similar report can be found at
:class:`.InboundAndOutboundVehicleCapacityPreviewReport`.
The visual and table are expected to approximately look like in the
`example InboundAndOutboundVehicleCapacityPreviewReport \
<notebooks/previews.ipynb#Inbound-And-Outbound-Vehicle-Capacity-Preview-Report>`_.
"""
report_description = """
This report previews the inbound and outbound traffic with a focus on up to which extend the vehicle capacities will
be exceeded. This is only an estimate, additional restrictions (such as the dwell time restrictions) might further
reduce the number of containers one vehicle can in fact pick up for its outbound journey.
"""
def __init__(self):
super().__init__()
self.preview = VehicleCapacityExceededPreview(
start_date=self.start_date,
end_date=self.end_date,
transportation_buffer=self.transportation_buffer
)
def hypothesize_with_mode_of_transport_distribution(
self,
mode_of_transport_distribution: Dict[ModeOfTransport, Dict[ModeOfTransport, float]]
):
self.preview.hypothesize_with_mode_of_transport_distribution(mode_of_transport_distribution)
def get_report_as_text(
self
) -> str:
comparison = self._get_comparison()
# create string representation
report = "\n"
report += "vehicle type "
report += "maximum capacity (in TEU) "
report += "required capacity (in TEU) "
report += "exceeded "
report += "difference (in TEU)"
report += "\n"
for vehicle_type in self.order_of_vehicle_types_in_report:
vehicle_type_as_text = str(vehicle_type).replace("_", " ")
report += f"{vehicle_type_as_text:<17} "
(
container_capacity_to_pick_up,
maximum_capacity,
vehicle_type_capacity_is_exceeded
) = comparison[vehicle_type]
if not vehicle_type_capacity_is_exceeded:
difference = 0
else:
difference = container_capacity_to_pick_up - maximum_capacity
vehicle_type_capacity_is_exceeded_as_text = "yes" if vehicle_type_capacity_is_exceeded else "no"
report += f"{maximum_capacity:>24.1f} "
report += f"{container_capacity_to_pick_up:>25.1f} "
report += f"{vehicle_type_capacity_is_exceeded_as_text:>9}"
report += f"{difference:>20.1f}"
report += "\n"
report += "(rounding errors might exist)\n"
return report
def get_report_as_graph(self) -> object:
"""
Returns:
The matplotlib axis of the bar chart.
"""
comparison = self._get_comparison()
sns.set_palette(sns.color_palette())
df = pd.DataFrame.from_dict(comparison).T
df.columns = ["currently planned", "maximum", "exceeded"]
df.index = [str(i).replace("_", " ") for i in df.index]
df.rename({"truck": "truck (no max.)"}, axis=0, inplace=True)
ax = df.plot.barh()
ax.set_title("Capacity exceeded in preview?")
ax.set_xlabel("Capacity (in TEU)")
return ax
def _get_comparison(self):
assert self.start_date is not None
assert self.end_date is not None
assert self.transportation_buffer is not None
self.preview.update(
start_date=self.start_date,
end_date=self.end_date,
transportation_buffer=self.transportation_buffer
)
# gather data
comparison = self.preview.compare()
return comparison
``` |
{
"source": "1kastner/CouchDBAuthenticator",
"score": 3
} |
#### File: CouchDBAuthenticator/couchdbauthenticator/user_manager.py
```python
import requests
class CouchDBConnection:
"""
See the following for more information:
- https://docs.couchdb.org/en/stable/api/database/common.html
- https://docs.couchdb.org/en/stable/intro/security.html
"""
def __init__(self, server_url, username, password, ssl_verification=True):
"""
The username and password are used to log into the CouchDB.
As the database 'users' is deleted and recreated, that user must be
an administrator of the CouchDB.
"""
self.server_url = f"https://{server_url}/"
self.username = username
self.auth = requests.auth.HTTPBasicAuth(username, password)
self.ssl_verification = ssl_verification
def reset_users_database(self):
"""
(Re-)Creates the database 'users'
"""
response_1 = requests.delete(self.server_url + "users", auth=self.auth, verify=self.ssl_verification)
assert response_1.status_code in (200, 202, 404), "Deletion of database 'users' failed"
response_2 = requests.put(self.server_url + "users", auth=self.auth, verify=self.ssl_verification)
assert response_2.status_code in (201, 202), "Creation of database 'users' failed"
def restrict_access_to_couchdb_user(self):
"""
Cited from the documentation at https://docs.couchdb.org/en/latest/api/database/security.html
> Having no admins, only server admins (with the reserved _admin role) are able to update design
> document and make other admin level changes.
>
> Having no members, any user can write regular documents (any non-design document) and read documents
> from the database.
"""
security_endpoint = self.server_url + "/users/_security"
security_document = {
"admins":{
"names": []
},
"members":{
"names": [self.username]
},
}
response_1 = requests.put(security_endpoint, json=security_document, auth=self.auth,
verify=self.ssl_verification)
assert response_1.status_code == 200, "Security documet could not be inserted"
response_2 = requests.get(self.server_url + "users", verify=self.ssl_verification)
assert response_2.status_code == 401, "Database still accessible from unauthenticated users"
def add_new_user(self, username, password):
"""
Adds new user document to the users database.
"""
user_doc = {
"username": username,
"password": password,
"active": True
}
docid = username # Assert uniqueness
response = requests.put(self.server_url + "users/" + docid, auth=self.auth, verify=self.ssl_verification,
json=user_doc)
assert response.status_code != 409, "User already exists in database."
assert response.status_code in (201, 202), f"User could not be created. Status code: {response.status_code}"
def _find_user(self, username):
"""
Find user in database.
"""
retrieve_url = self.server_url + "/users/" + username # username is docid
response = requests.get(retrieve_url, auth=self.auth, verify=self.ssl_verification)
user_doc = response.json()
return user_doc
def deactivate_user(self, username):
user_doc = self._find_user(username)
user_doc["active"] = False
response = requests.put(self.server_url + "users/" + user_doc["_id"],
auth=self.auth, verify=self.ssl_verification,
json=user_doc)
assert response.status_code in (201, 202), "User document could not be updated"
def reactivate_user(self, username):
user_doc = self._find_user(username)
user_doc["active"] = True
response = requests.put(self.server_url + "users/" + user_doc["_id"],
auth=self.auth, verify=self.ssl_verification,
json=user_doc)
assert response.status_code in (201, 202), "User document could not be updated"
``` |
{
"source": "1kastner/einfuehrung-in-datenauswertung",
"score": 3
} |
#### File: 1kastner/einfuehrung-in-datenauswertung/gtsrb_image_database_loader.py
```python
import os
import pandas as pd
def load_traffic_sign_database(path_to_directory):
"""
:param path_to_directory: Points to the root directory of the traffic sign folder structure (including the Readme.md)
:return: DataFrame containing the paths to all images
"""
traffic_sign_class_dfs = []
if not os.path.exists(path_to_directory):
raise Exception("The following path does not exist: '{p}'".format(p=path_to_directory))
for traffic_sign_class in log_progress(os.listdir(path_to_directory)):
if os.path.isfile(os.path.join(path_to_directory, traffic_sign_class)):
continue # e.g. the readme file
meta_info_file_name = "GT-{class_id}.csv".format(class_id=traffic_sign_class)
path_to_meta_info_file = os.path.join(path_to_directory, traffic_sign_class, meta_info_file_name)
if not os.path.exists(path_to_meta_info_file):
raise Exception("The following file does not exist: '{p}'".format(p=path_to_meta_info_file))
df = pd.read_csv(path_to_meta_info_file, delimiter=";")
df.columns = [col.replace(".", "_") for col in df.columns]
images = []
images = df.Filename.apply(lambda file_name : os.path.join(os.path.dirname(path_to_meta_info_file), file_name))
df = df.assign(path_to_image=images)
traffic_sign_class_dfs.append(df)
return pd.concat(traffic_sign_class_dfs)
def log_progress(sequence, every=None, size=None, name='Items'):
"""
taken from https://github.com/alexanderkuk/log-progress
"""
from ipywidgets import IntProgress, HTML, VBox
from IPython.display import display
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200) # every 0.5%
else:
assert every is not None, 'sequence is iterator, set every'
if is_iterator:
progress = IntProgress(min=0, max=1, value=1)
progress.bar_style = 'info'
else:
progress = IntProgress(min=0, max=size, value=0)
label = HTML()
box = VBox(children=[label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = '{name}: {index} / ?'.format(
name=name,
index=index
)
else:
progress.value = index
label.value = u'{name}: {index} / {size}'.format(
name=name,
index=index,
size=size
)
yield record
except:
progress.bar_style = 'danger'
raise
else:
progress.bar_style = 'success'
progress.value = index
label.value = "{name}: {index}".format(
name=name,
index=str(index or '?')
)
``` |
{
"source": "1kastner/hyperNN",
"score": 3
} |
#### File: hyperNN/report/plots.py
```python
import matplotlib.pyplot as plt
from sklearn.manifold import MDS
import numpy as np
def accuracy(acc):
max_acc = [max(acc[:i+1]) for i in range(len(acc))]
plt.figure(figsize=(16, 4), dpi=100)
plt.plot(acc, color="grey", linewidth=2.5, label="Accuracy")
plt.plot(max_acc, color="g", linewidth=2.5, label="Best accuracy")
plt.xlabel("Iterations")
plt.xlim(0, len(acc))
plt.legend(loc=4)
plt.show()
def mds_accuracy(X, acc):
X = MDS(n_components=2, random_state=42).fit_transform(X)
plt.figure(figsize=(16, 4), dpi=100)
cb = plt.scatter(X[:, 0], X[:, 1], c=acc,
cmap=plt.cm.get_cmap('jet'),
vmin=0.1, vmax=1, s=45)
plt.colorbar(cb)
plt.title("Accuracy in two components MDS view")
plt.show()
def summary(acc, quantile):
print("Best accuracy: {} at iteration {}".format(acc.max(), acc.argmax()))
print("Number of solutions better than {0:g}%: {1:.1f}%".format(
100 * quantile,
100 * np.sum(acc >= quantile) / float(acc.shape[0])
))
```
#### File: hyperNN/report/processing.py
```python
import json
import numpy as np
def accuracy(filename):
with open(filename, "r") as f:
data = json.load(f)
accuracy = np.array(list(map(lambda j: j["Accuracy"], data)))
return accuracy
def params(filename):
with open(filename, "r") as f:
data = json.load(f)
return data
``` |
{
"source": "1kastner/illumidesk",
"score": 2
} |
#### File: illumidesk/authenticators/utils.py
```python
import random
import re
import secrets
import time
import uuid
from jupyterhub.handlers import BaseHandler
from tornado.web import HTTPError
from tornado.web import RequestHandler
from typing import Any
from typing import Dict
from typing import List
from typing import Tuple
from traitlets.config import LoggingConfigurable
# Determined from https://www.imsglobal.org/specs/ltiv1p1p1/implementation-guide
# This page also provides a nice summary of the required, recommended, and optional
# LTI 1.1 launch parameters: https://www.edu-apps.org/code.html. We define the user_id
# as required even though it is defined as recommended since we need this value to track
# this lms_user_id used in the grader db.
LTI11_LAUNCH_PARAMS_REQUIRED = [
'lti_message_type',
'lti_version',
'resource_link_id',
'user_id',
]
LTI11_LAUNCH_PARAMS_RECOMMENDED = [
'resource_link_title',
'roles',
'lis_person_name_given',
'lis_person_name_family',
'lis_person_name_full',
'lis_person_contact_email_primary',
'context_id',
'context_title',
'context_label',
'launch_presentation_locale',
'launch_presentation_document_target',
'launch_presentation_width',
'launch_presentation_height',
'launch_presentation_return_url',
'tool_consumer_info_product_family_code',
'tool_consumer_info_version',
'tool_consumer_instance_guid',
'tool_consumer_instance_name',
'tool_consumer_instance_contact_email',
]
LTI11_LAUNCH_PARAMS_OTIONAL = [
'resource_link_description',
'user_image',
'role_scope_mentor',
'context_type',
'launch_presentation_css_url',
'tool_consumer_instance_description',
'tool_consumer_instance_url',
]
LTI11_LIS_OPTION = [
'lis_outcome_service_url',
'lis_result_sourcedid',
'lis_person_sourcedid',
'lis_course_offering_sourcedid',
'lis_course_section_sourcedid',
]
# https://www.imsglobal.org/specs/ltiv1p1/implementation-guide
# Section 4.2
LTI11_OAUTH_ARGS = [
'oauth_consumer_key',
'oauth_signature_method',
'oauth_timestamp',
'oauth_nonce',
'oauth_callback',
'oauth_version',
'oauth_signature',
]
LAUNCH_PARAMS_REQUIRED = LTI11_LAUNCH_PARAMS_REQUIRED + LTI11_OAUTH_ARGS
LAUNCH_PARAMS_ALL = (
LTI11_LAUNCH_PARAMS_REQUIRED
+ LTI11_LAUNCH_PARAMS_RECOMMENDED
+ LTI11_LAUNCH_PARAMS_OTIONAL
)
class LTIUtils(LoggingConfigurable):
"""
A class which contains various utility functions
which work in conjunction with LTI requests.
"""
def normalize_name_for_containers(self, name: str) -> str:
"""
Function used to strip special characters and convert strings
to docker container compatible names. This function is used mostly
with course labels, as they are used for the shared grader notebook
container names.
Args:
name: The string to normalize for docker container and volume
names (e.g. Dev-IllumiDesk)
Returns:
normalized_name: The normalized string
"""
if not name:
raise ValueError('Name is empty')
# truncate name after 30th character
name = (name[:30] + '') if len(name) > 30 else name
# remove special characters
name = re.sub(r'[^\w-]+', '', name)
# if the first character is any of _.- remove it
name = name.lstrip('_.-')
# convert to lower case
name = name.lower()
# limit course_id to 22 characters, since its used for o/s username
# in jupyter/docker-stacks compatible grader notebook (NB_USER)
normalized_name = name[0:20]
self.log.debug('String normalized to %s' % normalized_name)
return normalized_name
def email_to_username(self, email: str) -> str:
"""
Normalizes an email to get a username. This function
calculates the username by getting the string before the
@ symbol, removing special characters, removing comments,
converting string to lowercase, and adds 1 if the username
has an integer value already in the string.
Args:
email: A valid email address
Returns:
username: A username string
Raises:
ValueError if email is empty
"""
if not email:
raise ValueError('email is missing')
username = email.split('@')[0]
username = username.split('+')[0]
username = re.sub(r'\([^)]*\)', '', username)
username = re.sub(r'[^\w-]+', '', username)
username = username.lower()
self.log.debug('Username normalized to %s' % username)
return username
def get_client_protocol(self, handler: RequestHandler) -> Dict[str, str]:
"""
This is a copy of the jupyterhub-ltiauthenticator logic to get the first
protocol value from the x-forwarded-proto list, assuming there is more than
one value. Otherwise, this returns the value as-is.
Extracted as a method to facilitate testing.
Args:
handler: a tornado.web.RequestHandler object
Returns:
A decoded dict with keys/values extracted from the request's arguments
"""
if 'x-forwarded-proto' in handler.request.headers:
hops = [
h.strip()
for h in handler.request.headers['x-forwarded-proto'].split(',')
]
protocol = hops[0]
else:
protocol = handler.request.protocol
return protocol
def convert_request_to_dict(
self, arguments: Dict[str, List[bytes]]
) -> Dict[str, str]:
"""
Converts the arguments obtained from a request to a dict.
Args:
handler: a tornado.web.RequestHandler object
Returns:
A decoded dict with keys/values extracted from the request's arguments
"""
args = {}
for k, values in arguments.items():
args[k] = (
values[0].decode() if len(values) == 1 else [v.decode() for v in values]
)
return args
```
#### File: illumidesk/spawners/spawner.py
```python
import os
import shutil
from dockerspawner import DockerSpawner
class IllumiDeskDockerSpawner(DockerSpawner):
"""
Custom DockerSpawner which assigns a user notebook image
based on the user's role. This spawner requires:
1. That the `Authenticator.enable_auth_state = True`
2. That the user's `USER_ROLE` environment variable is set
"""
def _image_from_role(self, user_role: str) -> str:
"""
Given a user role, return the right image
Args:
user_role: the user's role
Returns:
docker_image: docker image used to spawn container based on role
"""
if not user_role:
raise ValueError('user_role is missing')
# default to standard image, otherwise assign image based on role
self.log.debug('User role used to set image: %s' % user_role)
docker_image = str(os.environ.get('DOCKER_STANDARD_IMAGE'))
if user_role == 'Learner' or user_role == 'Student':
docker_image = str(os.environ.get('DOCKER_LEARNER_IMAGE'))
elif user_role == 'Instructor':
docker_image = str(os.environ.get('DOCKER_INSTRUCTOR_IMAGE'))
elif user_role == 'Grader':
docker_image = str(os.environ.get('DOCKER_GRADER_IMAGE'))
self.log.debug('Image based on user role set to %s' % docker_image)
return docker_image
async def auth_state_hook(self, spawner, auth_state):
"""
Customized hook to assign USER_ROLE environment variable to LTI user role.
The USER_ROLE environment variable is used to select the notebook image based
on the user's role.
"""
if not auth_state:
self.log.debug('auth_state not enabled.')
return
self.log.debug('auth_state_hook set with %s role' % auth_state['user_role'])
self.environment['USER_ROLE'] = auth_state['user_role']
self.log.debug(
'Assigned USER_ROLE env var to %s' % self.environment['USER_ROLE']
)
# Create a new user directory if it does not exist on the host, regardless
# of whether or not its mounted with NFS.
def pre_spawn_hook(self, spawner):
"""
Creates the user directory based on information passed from the
`spawner` object.
Args:
spawner: JupyterHub spawner object
"""
if not self.user.name:
raise ValueError('Spawner object does not contain the username')
username = self.user.name
user_path = os.path.join('/home', username)
if not os.path.exists(user_path):
os.mkdir(user_path)
shutil.chown(
user_path,
user=int(os.environ.get('MNT_HOME_DIR_UID')),
group=int(os.environ.get('MNT_HOME_DIR_GID')),
)
os.chmod(user_path, 0o755)
def start(self):
user_role = self.user.spawner.environment.get('USER_ROLE') or 'Learner'
self.log.debug('User %s has role: %s' % (self.user.name, user_role))
self.image = self._image_from_role(str(user_role))
self.log.debug('Starting with image: %s' % self.image)
return super().start()
```
#### File: illumidesk/authenticators/test_lti11_authenticator.py
```python
import os
import pytest
import unittest
import json
from tornado.web import RequestHandler
from tornado.web import HTTPError
from tornado.httputil import HTTPServerRequest
from typing import Dict
from unittest.mock import Mock
from unittest.mock import MagicMock
from unittest.mock import patch
from illumidesk.authenticators.validator import LTI11LaunchValidator
from illumidesk.authenticators.authenticator import LTI11Authenticator
from illumidesk.authenticators.utils import LTIUtils
def mock_lti11_args(lms_vendor: str) -> Dict[str, str]:
args = {
'oauth_consumer_key': ['my_consumer_key'.encode()],
'oauth_signature_method': ['HMAC-SHA1'.encode()],
'oauth_timestamp': ['1585947271'.encode()],
'oauth_nonce': ['01fy8HKIASKuD9gK9vWUcBj9fql1nOCWfOLPzeylsmg'.encode()],
'oauth_version': ['1.0'.encode()],
'context_id': ['888efe72d4bbbdf90619353bb8ab5965ccbe9b3f'.encode()],
'context_label': ['intro101'.encode()],
'context_title': ['intro101'.encode()],
'ext_roles': ['urn:lti:instrole:ims/lis/Learner'.encode()],
'launch_presentation_document_target': ['iframe'.encode()],
'launch_presentation_height': ['1000'.encode()],
'launch_presentation_locale': ['en'.encode()],
'launch_presentation_return_url': [
'https: //illumidesk.instructure.com/courses/161/external_content/success/external_tool_redirect'.encode()
],
'launch_presentation_width': ['1000'.encode()],
'lis_person_contact_email_primary': ['<EMAIL>'.encode()],
'lis_person_name_family': ['Bar'.encode()],
'lis_person_name_full': ['<NAME>'.encode()],
'lis_person_name_given': ['Foo'.encode()],
'lti_message_type': ['basic-lti-launch-request'.encode()],
'lti_version': ['LTI-1p0'.encode()],
'oauth_callback': ['about:blank'.encode()],
'resource_link_id': ['888efe72d4bbbdf90619353bb8ab5965ccbe9b3f'.encode()],
'resource_link_title': ['IllumiDesk'.encode()],
'roles': ['Learner,urn:lti:instrole:ims/lis/Learner'.encode()],
'tool_consumer_info_product_family_code': [lms_vendor.encode()],
'tool_consumer_info_version': ['cloud'.encode()],
'tool_consumer_instance_contact_email': ['<EMAIL>'.encode()],
'tool_consumer_instance_guid': [
'srnuz6h1U8kOMmETzoqZTJiPWzbPXIYkAUnnAJ4u:test-lms'.encode()
],
'tool_consumer_instance_name': ['myorg'.encode()],
'user_id': ['185d6c59731a553009ca9b59ca3a885100000'.encode()],
'user_image': ['https://lms.example.com/avatar-50.png'.encode()],
'oauth_signature': ['abc123'.encode()],
}
return args
def _side_effect_canvas_lms(lms_vendor: str) -> str:
arguments = mock_lti11_args(lms_vendor)
arguments['custom_canvas_user_login_id'] = ['mycanvasuser'.encode()]
return arguments
def _side_effect_other_lms(lms_vendor: str) -> str:
arguments = mock_lti11_args(lms_vendor)
return arguments
@pytest.mark.asyncio
@patch('illumidesk.authenticators.authenticator.LTI11LaunchValidator')
async def test_authenticator_returns_auth_state_with_canvas_fields(lti11_authenticator):
'''
Do we get a valid username when sending an argument with the custom canvas id?
'''
with patch.object(
LTI11LaunchValidator, 'validate_launch_request', return_value=True
):
authenticator = LTI11Authenticator()
handler = Mock(
spec=RequestHandler,
get_secure_cookie=Mock(return_value=json.dumps(['key', 'secret'])),
request=Mock(arguments=mock_lti11_args('canvas'), headers={}, items=[],),
)
result = await authenticator.authenticate(handler, None)
expected = {
'name': 'foo',
'auth_state': {
'course_id': 'intro101',
'lms_user_id': '185d6c59731a553009ca9b59ca3a885100000',
'user_role': 'Learner',
},
}
assert result == expected
@pytest.mark.asyncio
@patch('illumidesk.authenticators.authenticator.LTI11LaunchValidator')
async def test_authenticator_returns_auth_state_with_other_lms_vendor(
lti11_authenticator,
):
'''
Do we get a valid username with lms vendors other than canvas?
'''
utils = LTIUtils()
utils.convert_request_to_dict = MagicMock(name='convert_request_to_dict')
utils.convert_request_to_dict(3, 4, 5, key='value')
with patch.object(
LTI11LaunchValidator, 'validate_launch_request', return_value=True
):
authenticator = LTI11Authenticator()
handler = Mock(
spec=RequestHandler,
get_secure_cookie=Mock(return_value=json.dumps(['key', 'secret'])),
request=Mock(arguments=mock_lti11_args('moodle'), headers={}, items=[],),
)
result = await authenticator.authenticate(handler, None)
expected = {
'name': 'foo',
'auth_state': {
'course_id': 'intro101',
'lms_user_id': '185d6c59731a553009ca9b59ca3a885100000',
'user_role': 'Learner',
},
}
assert result == expected
@pytest.mark.asyncio
async def test_authenticator_uses_ltivalidator():
with patch.object(
LTI11LaunchValidator, 'validate_launch_request', return_value=True
) as mock_validator:
authenticator = LTI11Authenticator()
handler = Mock(spec=RequestHandler)
request = HTTPServerRequest(method='POST', connection=Mock(),)
handler.request = request
handler.request.arguments = mock_lti11_args('lmsvendor')
handler.request.get_argument = lambda x, strip=True: mock_lti11_args(
'lmsvendor'
)[x][0].decode()
_ = await authenticator.authenticate(handler, None)
assert mock_validator.called
@pytest.mark.asyncio
async def test_authenticator_invokes_validator_with_decoded_dict():
with patch.object(
LTI11LaunchValidator, 'validate_launch_request', return_value=True
) as mock_validator:
authenticator = LTI11Authenticator()
handler = Mock(spec=RequestHandler)
request = HTTPServerRequest(method='POST', uri='/hub', host='example.com')
handler.request = request
handler.request.protocol = 'https'
handler.request.arguments = mock_lti11_args('canvas')
handler.request.get_argument = lambda x, strip=True: mock_lti11_args('canvas')[
x
][0].decode()
_ = await authenticator.authenticate(handler, None)
# check our validator was called
assert mock_validator.called
decoded_args = {
k: handler.request.get_argument(k, strip=False)
for k, v in mock_lti11_args('canvas').items()
}
# check validator was called with correct dict params (decoded)
mock_validator.assert_called_with('https://example.com/hub', {}, decoded_args)
``` |
{
"source": "1kastner/jupyterhub-hashauthenticator",
"score": 3
} |
#### File: jupyterhub-hashauthenticator/hashauthenticator/hashauthenticator.py
```python
from jupyterhub.auth import Authenticator
from tornado import gen
from traitlets import Unicode, Integer
import hashlib, binascii
def generate_password_digest(username, secret_key):
dk = hashlib.pbkdf2_hmac('sha256', username.encode(), secret_key.encode(), 25000)
password_digest = binascii.hexlify(dk).decode()
return password_digest
class HashAuthenticator(Authenticator):
secret_key = Unicode(
config=True,
help="Key used to encrypt usernames to produce passwords."
)
password_length = Integer(
default_value=6,
config=True,
help="Password length.")
@gen.coroutine
def authenticate(self, handler, data):
username = data['username']
password = data['password']
password_digest = generate_password_digest(username, self.secret_key)
expected_password = password_digest[:self.password_length]
if password == <PASSWORD>:
return username
return None
``` |
{
"source": "1kastner/SSHSpawner",
"score": 2
} |
#### File: SSHSpawner/sshspawner/sshspawner.py
```python
import os
import pipes
import pwd
import re
import random
import stat
import pexpect
import shutil
import signal
from glob import glob
from urllib.parse import urlparse, urlunparse
from pexpect import popen_spawn
from tempfile import TemporaryDirectory
from jupyterhub.spawner import LocalProcessSpawner
from traitlets import default
from traitlets import (
Bool, Integer, Unicode, Int, List
)
from jupyterhub.utils import (
random_port, can_connect, wait_for_http_server, make_ssl_context
)
_script_template = """#!/bin/bash
# entrypoint for shared kernel link?
# start the notebook with appropriate args
{}
"""
class HostNotFound(Exception):
def __init__(self, host):
super().__init__(self,
"Unable to locate host {host}.".format(host=host))
class ConnectionError(Exception):
def __init__(self, host):
super().__init__(self,
"Unable to connect to host {host}".format(host=host))
class SSHSpawner(LocalProcessSpawner):
ssh_target = ""
resource_path = Unicode(
".jupyter/jupyterhub/resources",
help="""The base path where all necessary resources are placed.
Generally left relative so that resources are placed into this base
directory in the users home directory.
"""
).tag(config=True)
hostname = Unicode(
"",
help="Hostname of the hub host. Useful if the Hub is in a container."
).tag(config=True)
known_hosts = Unicode(
"/opt/jupyter/known_hosts",
help="Premade known_hosts file to enable trusted, seamless ssh."
).tag(config=True)
ssh_hosts = List(
[],
help="List of available hosts to ssh to."
).tag(config=True)
allow_origin_pattern = Unicode(
"",
help="Pattern for CORS requests (when behind a reverse proxy)"
).tag(config=True)
local_logfile = Unicode(
"",
help="""Name of the file to redirect stdout and stderr from the remote
notebook."""
).tag(config=True)
ssh_control_persist_time = Int(
1,
help="""The amount of time for SSH connections over the control master
will stay active"""
).tag(config=True)
cleanup_server = Bool(
True,
help="Teardown the notebook server when contact is lost with the hub."
).tag(config=True)
hub_check_interval = Integer(
5,
help="Interval in minutes to check if notebook has been orphaned."
).tag(config=True)
notebook_max_lifetime = Integer(
12,
help="Max lifetime in hours for a remotely spawned notebook to live."
).tag(config=True)
idle_timeout = Integer(
300,
help="""The amount of time before culling an idle kernel."""
).tag(config=True)
start_notebook_cmd = Unicode(
"start-notebook",
help="""The command to run to start a notebook"""
).tag(config=True)
stop_notebook_cmd = Unicode(
"stop-notebook",
help="""The command to run to stop a running notebook"""
).tag(config=True)
@property
def ssh_socket(self):
return "/tmp/{user}@{host}".format(
user=self.user.name,
host=self.ssh_target
)
def get_user_ssh_hosts(self):
return self.ssh_hosts
@default('options_form')
def _options_form(self):
"""Populate a list of ssh targets on the pre_spawn form"""
hosts = self.get_user_ssh_hosts()
if not hosts:
return """
<label for="host">Input host for notebook launch:</label>
<input type="text" name="host" class="form-control">
"""
host_option_template = '<option value="{host}">{host}</option>'
host_option_tags = []
for host in hosts:
host_option_tags.append(
host_option_template.format(host=host))
options = ''.join(host_option_tags)
return """
<label for="host">Select host for notebook launch:</label>
<select name="host" class="form-control">{options}</select>
""".format(options=options)
def options_from_form(self, formdata):
"""Turn html formdata from `options_form` into a dict for later use"""
options = {}
options['host'] = pipes.quote(formdata.get('host', [''])[0].strip())
return options
def ips_for_host(self, host):
"""Return all the ips reported by the host command"""
ip_pattern = r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}"
child = pexpect.spawn("host {}".format(host), encoding="utf-8")
i = child.expect([r"Host \w+ not found", ".*has address.*"])
if i == 0:
raise HostNotFound(host)
else:
lines = child.after.split('\n')
# Look for ip addresses and build a list of the ones found
lines = [match.group() for match
in [re.search(ip_pattern, line) for line in lines]
if match]
if len(lines) == 0:
raise HostNotFound(host)
return lines
def ssh_opts(self, persist=180,
known_hosts="", batch_mode=True, other_opts=None):
"""Default set of options to attach to ssh commands
The minimum arguments are a good, known_hosts file and enabling
batch mode. The known_hosts file avoids user's known_hosts files
which may not trust other hosts. Batch mode will cause ssh to fail
on prompting for a password.
This implementation also uses ssh ControlMaster to speed up and
simplify repeated operations over SSH.
"""
opts = {
"ControlMaster": "auto",
"ControlPath": "/tmp/%r@%h",
"ControlPersist": persist,
"BatchMode": batch_mode,
}
if known_hosts:
opts["UserKnownHostsFile"] = known_hosts
else:
self.log.warning("Skipping host key check")
opts["StrictHostKeyChecking"] = "no"
if other_opts:
opts.extend(other_opts)
tmpl = "-o {opt}={val}"
return ' '.join(
[tmpl.format(opt=opt, val=val) for opt, val in opts.items()])
def spawn_as_user(self, cmd, timeout=10):
"""Run pexpect as the user spawning the notebook
This method attaches kerberos credentals to the command env if they
exist.
"""
user = pwd.getpwnam(self.user.name)
uid = user.pw_uid
env = os.environ
krb_files = glob("/tmp/krb5cc_{uid}*".format(uid=uid))
if krb_files:
env["KRB5CCNAME"] = max(krb_files, key=os.path.getctime)
popen_kwargs = dict(
env=env,
timeout=timeout,
encoding="utf-8",
preexec_fn=self.make_preexec_fn(self.user.name)
)
self.log.debug("Running: {cmd} as {user}".format(
cmd=cmd,
user=self.user.name))
return popen_spawn.PopenSpawn(cmd, **popen_kwargs)
async def remote_env(self, host=None):
"""Command with the `get_env` environment as the input to `/bin/env`
Used to pass the necessary environment to the `jupyterhub-singleuser`
command and isolate/hide the environment variables via `/bin/env`.
"""
def env_str_to_dict(output):
"Convert the output of `env` into a dict"
d = {}
lines = output.split('\n')
for line in lines:
divided = line.split('=')
if len(divided) == 2:
var, val = divided
d[var] = val
elif len(divided) == 1:
var = divided[0]
d[var] = ''
return d
if host:
opts = self.ssh_opts(
known_hosts=self.known_hosts
)
self.log.info("Collecting remote environment from {}".format(host))
child = self.spawn_as_user(
"ssh {opts} {host} env".format(opts=opts, host=host)
)
child.expect(pexpect.EOF)
return env_str_to_dict(child.before)
def ip_for_host(self, host):
"""Return an ip for a given host
This method is meant to pick from a series of ips that come back from
invoking the host command. This could be used to implement load
balancing.
"""
ips = self.ips_for_host(host)
random.shuffle(ips)
for ip in ips:
if can_connect(ip, 22):
return ip
raise ConnectionError(host)
def get_env(self, other_env=None):
"""Get environment variables to be set in the spawned process."""
def swap_host(url, hostname=""):
if not hostname:
return url
parsed = urlparse(url)
parsed = parsed._replace(netloc=hostname + ":" + str(parsed.port))
return urlunparse(parsed)
env = super().get_env()
if other_env:
env.update(other_env)
unwanted_keys = set(["VIRTUAL_ENV", "SSH_ASKPASS"])
for key in unwanted_keys:
if key in env:
del env[key]
env['JUPYTERHUB_CLEANUP_SERVERS'] = self.cleanup_server
env['JUPYTERHUB_CHECK_INTERVAL'] = self.hub_check_interval * 60
env['JUPYTERHUB_MAX_LIFETIME'] = self.notebook_max_lifetime * 60 * 60
# This is to account for running JupyterHub in a container since the
# container hostname will be meaningless.
env['JUPYTERHUB_API_URL'] = swap_host(
env['JUPYTERHUB_API_URL'],
hostname=self.hostname
)
env['JUPYTERHUB_ACTIVITY_URL'] = swap_host(
env['JUPYTERHUB_ACTIVITY_URL'],
hostname=self.hostname
)
# If the user starting their notebook is in the list of admins
if self.user.name in self.user.settings.get('admin_users', []):
env['JUPYTERHUB_ADMIN_ACCESS'] = 1
else:
env['JUPYTERHUB_ADMIN_ACCESS'] = 0
return env
def get_args(self):
"""Get the args to send to the jupyterhub-singleuser command
Extends the default `get_args` command and adds arguments for security
and specifically to make the SSHSpawner work.
"""
args = super().get_args()
if self.allow_origin_pattern:
args.append(
'--SingleUserNotebookApp.allow_origin_pat={patt}'
.format(patt=self.allow_origin_pattern)
)
args.append(
'--MappingKernelManager.cull_idle_timeout={timeout}'
.format(timeout=self.idle_timeout)
)
args.append('--KernelManager.transport=ipc')
if self.local_logfile:
args.append('2>&1 | tee -a {base}/{logfile}'.format(
base=self.resource_path, logfile=self.local_logfile))
return args
def stage_certs(self, paths, dest):
shutil.move(paths['keyfile'], dest)
shutil.move(paths['certfile'], dest)
shutil.copy(paths['cafile'], dest)
key_base_name = os.path.basename(paths['keyfile'])
cert_base_name = os.path.basename(paths['certfile'])
ca_base_name = os.path.basename(paths['cafile'])
key = os.path.join(self.resource_path, key_base_name)
cert = os.path.join(self.resource_path, cert_base_name)
ca = os.path.join(self.resource_path, ca_base_name)
return {
"keyfile": key,
"certfile": cert,
"cafile": ca,
}
async def create_start_script(self, start_script, remote_env=None):
user = pwd.getpwnam(self.user.name)
uid = user.pw_uid
gid = user.pw_gid
env = self.get_env(other_env=remote_env)
quoted_env = ["env"] +\
[pipes.quote("{var}={val}".format(var=var, val=val))
for var, val in env.items()]
# environment + cmd + args
cmd = quoted_env + self.cmd + self.get_args()
with open(start_script, "w") as fh:
fh.write(
_script_template.format(' '.join(cmd))
)
shutil.chown(start_script, user=uid, group=gid)
os.chmod(
start_script,
stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
)
async def start(self):
with TemporaryDirectory() as td:
local_resource_path = td
start_script = os.path.join(
local_resource_path,
self.start_notebook_cmd
)
user = pwd.getpwnam(self.user.name)
uid = user.pw_uid
gid = user.pw_gid
self.port = random_port()
host = pipes.quote(self.user_options['host'])
self.ssh_target = self.ip_for_host(host)
remote_env = await self.remote_env(host=self.ssh_target)
opts = self.ssh_opts(
persist=self.ssh_control_persist_time,
known_hosts=self.known_hosts
)
self.cert_paths = self.stage_certs(
self.cert_paths,
local_resource_path
)
# Create the start script (part of resources)
await self.create_start_script(start_script, remote_env=remote_env)
# Set proper ownership to the user we'll run as
for f in [local_resource_path] + \
[os.path.join(local_resource_path, f)
for f in os.listdir(local_resource_path)]:
shutil.chown(f, user=uid, group=gid)
# Create remote directory in user's home
create_dir_proc = self.spawn_as_user(
"ssh {opts} {host} mkdir -p {path}".format(
opts=opts,
host=self.ssh_target,
path=self.resource_path
)
)
create_dir_proc.expect(pexpect.EOF)
copy_files_proc = self.spawn_as_user(
"scp {opts} {files} {host}:{target_dir}/".format(
opts=opts,
files=' '.join([os.path.join(local_resource_path, f)
for f in os.listdir(local_resource_path)]),
cp_dir=local_resource_path,
host=self.ssh_target,
target_dir=self.resource_path
)
)
i = copy_files_proc.expect([
".*No such file or directory",
"ssh: Could not resolve hostname",
pexpect.EOF,
])
if i == 0:
raise IOError("No such file or directory: {}".format(
local_resource_path))
elif i == 1:
raise HostNotFound(
"Could not resolve hostname {}".format(self.ssh_target)
)
elif i == 2:
self.log.info("Copied resources for {user} to {host}".format(
user=self.user.name,
host=self.ssh_target
))
# Start remote notebook
start_notebook_child = self.spawn_as_user(
"ssh {opts} -L {port}:{ip}:{port} {host} {cmd}".format(
ip="127.0.0.1",
port=self.port,
opts=opts,
host=self.ssh_target,
cmd=os.path.join(self.resource_path,
self.start_notebook_cmd)
),
timeout=None
)
self.proc = start_notebook_child.proc
self.pid = self.proc.pid
if self.ip:
self.user.server.ip = self.ip
self.user.server.port = self.port
return (self.ip or '127.0.0.1', self.port)
async def stop(self, now=False):
"""Stop the remote single-user server process for the current user.
For the SSHSpawner, this means first attempting to stop the remote
notebook and then killing the tunnel process (which should die once
the notebook does).
The `jupyterhub-singleuser` command has been modified to periodically
poll the hub for contact and authorization. Failing these, it should
think itself orphaned and shut itself down.
"""
status = await self.poll()
if status is not None:
return
self.log.info("Stopping user {user}'s notebook at port {port} on host "
"{host}".format(user=self.user.name, port=self.port,
host=self.ssh_target))
stop_child = self.spawn_as_user("ssh {opts} {host} {cmd}".format(
opts=self.ssh_opts(known_hosts=self.known_hosts),
host=self.ssh_target,
cmd=self.stop_notebook_cmd
)
)
stop_child.expect(pexpect.EOF)
ret_code = stop_child.wait()
if ret_code == 0:
self.log.info("Notebook stopped")
self.log.debug("Killing %i", self.pid)
await self._signal(signal.SIGKILL)
# close the tunnel
os.remove(self.ssh_socket)
async def poll(self):
"""Poll the spawned process to see if it is still running and reachable
If the process is still running, and we can connect to the remote
singleuser server over the tunnel, we return None. If it is not
running, or unreachable we return the exit code of the process if we
have access to it, or 0 otherwise.
"""
status = await super().poll()
if status is not None:
return status
elif not os.path.exists(self.ssh_socket):
# tunnel is closed or non-existent
return 0
else:
protocol = "http" if not self.user.settings["internal_ssl"] \
else "https"
url = "{protocol}://{ip}:{port}".format(
protocol=protocol,
ip=(self.ip or '127.0.0.1'),
port=self.port
)
key = self.user.settings.get('internal_ssl_key')
cert = self.user.settings.get('internal_ssl_cert')
ca = self.user.settings.get('internal_ssl_ca')
ctx = make_ssl_context(key, cert, cafile=ca)
try:
reachable = await wait_for_http_server(url, ssl_context=ctx)
except Exception as e:
if isinstance(e, TimeoutError):
e.reason = 'timeout'
self.log.warning(
"Unable to reach {user}'s server for 10 seconds. "
"Giving up: {err}".format(
user=self.user.name,
err=e
),
)
return 1
else:
e.reason = 'error'
self.log.warning(
"Error reaching {user}'s server: {err}".format(
user=self.user.name,
err=e
)
)
return 2
else:
return None if reachable else 0
``` |
{
"source": "1kc2/Minimal-Correlation-Portfolio",
"score": 3
} |
#### File: 1kc2/Minimal-Correlation-Portfolio/build_correlation_network.py
```python
import pandas as pd
import numpy as np
import preprocess as pre
import detrend as dtr
import networkx as nx
import distance_correlation as dc
def build_network():
df = dc.distance_correlation()
# converts the dataframe to a matrix (need this to generate the graph from the networkx package)
cor_matrix = df.values.astype("float")
sim_matrix = 1 - cor_matrix
# transforms the similarity matrix into a weighted graph
G = nx.from_numpy_matrix(sim_matrix)
# extracts the indices (i.e., the stock names) from the correlation dataframe
stocks = df.index.values
# relabel nodes as the stock names
G = nx.relabel_nodes(G, lambda x: stocks[x])
# prints the edges with their corresponding weights
G.edges(data=True)
# copy correlation network
H = G.copy()
# removes edges from H with data from G
for (u, v, wt) in G.edges.data("weight"):
# removes edges with absolute correlations less than or equal to 0.35
if wt >= 1 - 0.35:
H.remove_edge(u, v)
# remove self-edges from H using data from G.
# this is needed for graph-theoretic analyses
if u == v:
H.remove_edge(u, v)
return H
if __name__ == "__main__":
build_network()
```
#### File: 1kc2/Minimal-Correlation-Portfolio/detrend.py
```python
import pandas as pd
import numpy as np
import preprocess as pre
from stocks import tickers
cols = ["Date", "Open", "High", "Low", "Close", "Volume", "Name"]
names = tickers
def detrend():
df = pre.preprocess()
# detrends each stock time series using the difference method
for n in names:
df[n] = df[n].diff()
return df
if __name__ == "__main__":
detrend()
```
#### File: 1kc2/Minimal-Correlation-Portfolio/plot_stocks_timeseries.py
```python
import pandas as pd
import numpy as np
import preprocess as pre
import matplotlib.pyplot as plt
from matplotlib.dates import AutoDateFormatter, AutoDateLocator
import seaborn as sns
def plot_stocks_timeseries():
df = pre.preprocess()
fig = plt.figure()
ax = fig.add_subplot(111)
xtick_locator = AutoDateLocator()
xtick_formatter = AutoDateFormatter(xtick_locator)
# the two stocks to compare
df["AAPL"].plot()
df["AMZN"].plot()
ax.xaxis.set_major_locator(xtick_locator)
ax.xaxis.set_major_formatter(xtick_formatter)
plt.show()
if __name__ == "__main__":
plot_stocks_timeseries()
```
#### File: 1kc2/Minimal-Correlation-Portfolio/stats.py
```python
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import detrend as dtr
import networkx as nx
import build_correlation_network as bnx
def stats():
# import the correlation network
H = bnx.build_network()
close_ct_d = nx.closeness_centrality(H, distance="weight")
between_ct_d = nx.betweenness_centrality(H, weight="weight")
degree_ct_d = nx.degree_centrality(H)
katz_ct_d = nx.katz_centrality(
H,
weight="weight",
alpha=1 / (max(nx.adjacency_spectrum(H)) + 1),
beta=close_ct_d,
)
degree_ct_s = pd.Series(degree_ct_d).round(3)
close_ct_s = pd.Series(close_ct_d).round(3)
between_ct_s = pd.Series(between_ct_d).round(3)
katz_ct_s = pd.Series(katz_ct_d).round(3).astype("float")
close_ct_s.reset_index()
degree_ct_s.reset_index()
between_ct_s.reset_index()
katz_ct_s.reset_index()
degree_ct_df = (
pd.DataFrame({"stock_rank_1": degree_ct_s.index, "degree": degree_ct_s.values})
.sort_values(by="degree", ascending=True)
.reset_index()
.drop("index", axis=1)
)
close_ct_df = (
pd.DataFrame({"stock_rank_2": close_ct_s.index, "closeness": close_ct_s.values})
.sort_values(by="closeness", ascending=True)
.reset_index()
.drop("index", axis=1)
)
between_ct_df = (
pd.DataFrame(
{"stock_rank_3": between_ct_s.index, "between": between_ct_s.values}
)
.sort_values(by="between", ascending=True)
.reset_index()
.drop("index", axis=1)
)
katz_ct_df = (
pd.DataFrame({"stock_rank_4": katz_ct_s.index, "katz": katz_ct_s.values})
.sort_values(by="katz", ascending=True)
.reset_index()
.drop("index", axis=1)
)
df1 = degree_ct_df.join(close_ct_df)
df2 = df1.join(between_ct_df)
ct_df = df2.join(katz_ct_df)
return print(ct_df)
if __name__ == "__main__":
stats()
```
#### File: 1kc2/Minimal-Correlation-Portfolio/timeseries_network.py
```python
import numpy as np
import pandas as pd
import networkx as nx
import dcor
def timeseries_correlation_network(data, corr_param = 'pcor', prune=0.35):
if corr_param == "dcor":
col_names = data.columns.tolist()
data_dcor = pd.DataFrame(index=col_names, columns=col_names)
k = 0
for i in col_names:
v_i = data.loc[:, i].values
for j in col_names[k:]:
v_j = data.loc[:, j].values
dcor_val = dcor.distance_correlation(v_i, v_j)
data_dcor.at[i, j] = dcor_val
data_dcor.at[j, i] = dcor_val
k += 1
# converts the dataframe to a matrix (need this to generate the graph from the networkx package)
dcor_matrix = data_dcor.values.astype("float")
sim_matrix = 1 - dcor_matrix
nodes = data_dcor.index.values
# transforms the similarity matrix into a weighted graph
G = nx.from_numpy_matrix(sim_matrix)
# relabel nodes as the stock names
G = nx.relabel_nodes(G, lambda x: nodes[x])
# prints the edges with their corresponding weights
G.edges(data=True)
# copy correlation network
H = G.copy()
# remove self-edges from H (required for graph-theoretic analyses)
for (u, v) in G.edges:
if u == v:
H.remove_edge(u,v)
if prune != None:
# removes weakly correlated edges from H
for (u, v, wt) in G.edges.data("weight"):
if wt >= 1 - prune:
H.remove_edge(u, v)
return H
if corr_param == "pcor":
pcor_matrix = data.iloc[:, 1:].corr()
nodes = pcor_matrix.index.values
pcor_matrix = np.asmatrix(pcor_matrix)
sim_matrix = 1 - abs(pcor_matrix)
G = nx.from_numpy_matrix(sim_matrix)
G = nx.relabel_nodes(G, lambda x: nodes[x])
G.edges(data=True)
H = G.copy()
for (u, v) in G.edges:
if u == v:
H.remove_edge(u,v)
if prune != None:
for (u, v, wt) in G.edges.data("weight"):
if wt >= 1 - prune:
H.remove_edge(u, v)
return H
``` |
{
"source": "1kc2/tetrisAI",
"score": 3
} |
#### File: tetrisAI/src/exceptions.py
```python
class UnknownInstructionException(Exception):
pass
class BlockLimitException(Exception):
pass
class NoBlockException(Exception):
def __init__(self):
super().__init__("This board has no block to manipulate.")
```
#### File: tetrisAI/src/visual-pygame.py
```python
from adversary import RandomAdversary
from arguments import parser
from board import Board, Direction, Rotation
from constants import BOARD_WIDTH, BOARD_HEIGHT, DEFAULT_SEED, INTERVAL
from player import Player, SelectedPlayer
import pygame
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
CELL_WIDTH = 20
CELL_HEIGHT = 20
EVENT_FORCE_DOWN = pygame.USEREVENT + 1
FRAMES_PER_SECOND = 60
class Square(pygame.sprite.Sprite):
def __init__(self, color, x, y):
super().__init__()
self.image = pygame.Surface([CELL_WIDTH, CELL_HEIGHT])
self.image.fill(color)
self.rect = self.image.get_rect()
self.rect.x = x * CELL_WIDTH
self.rect.y = y * CELL_HEIGHT
def render(screen, board):
screen.fill(BLACK)
sprites = pygame.sprite.Group()
# Add the cells already on the board for drawing.
for (x, y) in board:
sprites.add(Square(pygame.Color(board.cellcolor[x, y]), x, y))
if board.falling is not None:
# Add the cells of the falling block for drawing.
for (x, y) in board.falling:
sprites.add(Square(pygame.Color(board.falling.color), x, y))
if board.next is not None:
for (x, y) in board.next:
sprites.add(
Square(
pygame.Color(board.next.color),
x + board.width + 2,
y+1
)
)
sprites.draw(screen)
pygame.draw.line(
screen,
BLUE,
(board.width * CELL_WIDTH + 2, 0),
(board.width * CELL_WIDTH + 2, board.height * CELL_HEIGHT)
)
# Update window title with score.
pygame.display.set_caption(f'Score: {board.score}')
class UserPlayer(Player):
"""
A simple user player that reads moves from the command line.
"""
key_to_move = {
pygame.K_RIGHT: Direction.Right,
pygame.K_LEFT: Direction.Left,
pygame.K_DOWN: Direction.Down,
pygame.K_SPACE: Direction.Drop,
pygame.K_UP: Rotation.Clockwise,
pygame.K_z: Rotation.Anticlockwise,
pygame.K_x: Rotation.Clockwise,
}
def choose_action(self, board):
while True:
event = pygame.event.wait()
if event.type == pygame.QUIT:
raise SystemExit
elif event.type == pygame.KEYUP:
if event.key in self.key_to_move:
return self.key_to_move[event.key]
elif event.key == pygame.K_ESCAPE:
raise SystemExit
elif event.type == EVENT_FORCE_DOWN:
return None
def check_stop():
for event in pygame.event.get():
if event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE:
raise SystemExit
elif event.type == pygame.QUIT:
raise SystemExit
def run():
board = Board(BOARD_WIDTH, BOARD_HEIGHT)
adversary = RandomAdversary(DEFAULT_SEED)
args = parser.parse_args()
if args.manual:
player = UserPlayer()
else:
player = SelectedPlayer()
pygame.init()
screen = pygame.display.set_mode([
(BOARD_WIDTH + 6) * CELL_WIDTH,
BOARD_HEIGHT * CELL_HEIGHT
])
clock = pygame.time.Clock()
# Set timer to force block down when no input is given.
pygame.time.set_timer(EVENT_FORCE_DOWN, INTERVAL)
for move in board.run(player, adversary):
render(screen, board)
pygame.display.flip()
# If we are not playing manually, clear the events.
if not args.manual:
check_stop()
clock.tick(FRAMES_PER_SECOND)
while True:
check_stop()
if __name__ == '__main__':
run()
``` |
{
"source": "1kc/trinity",
"score": 2
} |
#### File: trinity/tests-trio/conftest.py
```python
from pathlib import Path
import secrets
import tempfile
import uuid
from eth_keys.datatypes import PrivateKey
from lahja import ConnectionConfig
from lahja.trio.endpoint import TrioEndpoint
import pytest
import trio
from eth2.beacon.chains.testnet import SkeletonLakeChain
from eth2.beacon.state_machines.forks.skeleton_lake.config import (
MINIMAL_SERENITY_CONFIG,
)
from eth2.beacon.tools.misc.ssz_vector import override_lengths
from eth2.beacon.types.states import BeaconState
from trinity.config import BeaconChainConfig
# Fixtures below are copied from https://github.com/ethereum/lahja/blob/f0b7ead13298de82c02ed92cfb2d32a8bc00b42a/tests/core/trio/conftest.py # noqa: E501
@pytest.fixture
def ipc_base_path():
with tempfile.TemporaryDirectory() as temp_dir:
yield Path(temp_dir)
def generate_unique_name() -> str:
# We use unique names to avoid clashing of IPC pipes
return str(uuid.uuid4())
@pytest.fixture
def endpoint_server_config(ipc_base_path):
config = ConnectionConfig.from_name(generate_unique_name(), base_path=ipc_base_path)
return config
@pytest.fixture
async def endpoint_server(endpoint_server_config):
async with TrioEndpoint.serve(endpoint_server_config) as endpoint:
yield endpoint
@pytest.fixture
async def endpoint_client(endpoint_server_config, endpoint_server):
async with TrioEndpoint("client-for-testing").run() as client:
await client.connect_to_endpoints(endpoint_server_config)
while not endpoint_server.is_connected_to("client-for-testing"):
await trio.sleep(0)
yield client
@pytest.fixture
def node_key():
key_bytes = secrets.token_bytes(32)
return PrivateKey(key_bytes)
@pytest.fixture
def eth2_config():
config = MINIMAL_SERENITY_CONFIG
# NOTE: have to ``override_lengths`` before we can parse ssz objects, like the BeaconState
override_lengths(config)
return config
@pytest.fixture
async def current_time():
return trio.current_time()
@pytest.fixture
def genesis_time(current_time, eth2_config):
slots_after_genesis = 10
return int(current_time - slots_after_genesis * eth2_config.SECONDS_PER_SLOT)
@pytest.fixture
def genesis_state(eth2_config, genesis_time):
state = BeaconState.create(config=eth2_config)
state.genesis_time = genesis_time
return state
@pytest.fixture
def chain_config(genesis_state, eth2_config):
return BeaconChainConfig(genesis_state, eth2_config, {})
@pytest.fixture
def database_dir(tmp_path):
return tmp_path
@pytest.fixture
def chain_class():
return SkeletonLakeChain
@pytest.fixture
def get_trio_time():
def _f():
return trio.current_time()
return _f
```
#### File: nodes/beacon/test_full.py
```python
import pytest
import trio
from trinity._utils.version import construct_trinity_client_identifier
from trinity.nodes.beacon.full import BeaconNode
@pytest.mark.trio
async def test_beacon_node_can_count_slots(
autojump_clock,
node_key,
eth2_config,
chain_config,
database_dir,
chain_class,
get_trio_time,
):
validator_api_port = 0
client_id = construct_trinity_client_identifier()
node = BeaconNode(
node_key,
eth2_config,
chain_config,
database_dir,
chain_class,
validator_api_port,
client_id,
get_trio_time,
)
some_slots = 10
a_future_slot = node.current_tick.slot + some_slots
seconds = some_slots * eth2_config.SECONDS_PER_SLOT
with trio.move_on_after(seconds):
await node.run()
assert node.current_tick.slot == a_future_slot
```
#### File: trinity/extensibility/trio.py
```python
from abc import abstractmethod
import asyncio
from multiprocessing import Process
import os
import signal
import trio
from async_service import background_trio_service
from asyncio_run_in_process.constants import SIGINT_TIMEOUT_SECONDS, SIGTERM_TIMEOUT_SECONDS
from lahja import EndpointAPI
from trinity._utils.logging import child_process_logging, get_logger
from trinity._utils.mp import ctx
from trinity._utils.profiling import profiler
from trinity.boot_info import BootInfo
from .component import BaseComponent, BaseIsolatedComponent
from .event_bus import TrioEventBusService
class TrioComponent(BaseComponent):
"""
``TrioComponent`` is a component that executes in-process
with the ``trio`` concurrency library.
"""
pass
class TrioIsolatedComponent(BaseIsolatedComponent):
logger = get_logger('trinity.extensibility.TrioIsolatedComponent')
async def run(self) -> None:
"""
Call chain is:
- multiprocessing.Process -> _run_process
* isolates to a new process
- _run_process -> run_process
* sets up subprocess logging
- run_process -> _do_run
* runs the event loop and transitions into async context
- _do_run -> do_run
* sets up event bus and then enters user function.
"""
# FIXME: Use subprocess.Popen() so that we can make every process be its own process group
# leader, like in AsyncioIsolatedComponent.
process = ctx.Process(
target=self.run_process,
args=(self._boot_info,),
)
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, process.start)
try:
await loop.run_in_executor(None, process.join)
finally:
# NOTE: Since the subprocess we start here runs in the same process group as us (see
# comment above as to why), when we get a Ctrl-C in the terminal the subprocess will
# get it as well, so we first wait a fraction of SIGINT_TIMEOUT_SECONDS for it to
# terminate (which is usually enough for trio to terminate all pending tasks), and
# only if it hasn't finished by then we send it a SIGINT. If we send a SIGINT straight
# away we cause trio to crash when we've been Ctrl-C'ed (because it would get two
# SIGINTs), and if we don't send one at all the subprocess never terminates when
# trinity exits because of a crash.
self.logger.debug("Waiting for process %d to terminate", process.pid)
await loop.run_in_executor(None, process.join, SIGINT_TIMEOUT_SECONDS / 4)
if process.is_alive():
self.logger.debug("Process %d did not terminate, sending SIGINT", process.pid)
self._send_signal_and_join(process, signal.SIGINT, SIGINT_TIMEOUT_SECONDS)
if process.is_alive():
self.logger.debug("Process %d did not terminate, sending SIGTERM", process.pid)
self._send_signal_and_join(process, signal.SIGTERM, SIGTERM_TIMEOUT_SECONDS)
def _send_signal_and_join(self, process: Process, sig: int, timeout: int) -> None:
try:
os.kill(process.pid, sig)
except ProcessLookupError:
self.logger.debug("Process %d has already terminated", process.pid)
return
# XXX: Using process.join() here is far from ideal as it blocks the main process,
# forcing us to wait in sequence for every trio components, but try and run it
# asynchronously (using an executor, like above) and you'll get a
# RuntimeError: cannot reuse already awaited coroutine
process.join(timeout)
self.logger.debug(
"process (%d) .join() returned, exitcode=%s", process.pid, process.exitcode)
@classmethod
def run_process(cls, boot_info: BootInfo) -> None:
with child_process_logging(boot_info):
if boot_info.profile:
with profiler(f'profile_{cls.get_endpoint_name()}'):
trio.run(cls._do_run, boot_info)
else:
trio.run(cls._do_run, boot_info)
@classmethod
async def _do_run(cls, boot_info: BootInfo) -> None:
event_bus_service = TrioEventBusService(
boot_info.trinity_config,
cls.get_endpoint_name(),
)
with trio.open_signal_receiver(signal.SIGINT, signal.SIGTERM) as signal_aiter:
async with background_trio_service(event_bus_service):
event_bus = await event_bus_service.get_event_bus()
async with trio.open_nursery() as nursery:
nursery.start_soon(cls.do_run, boot_info, event_bus)
async for sig in signal_aiter:
nursery.cancel_scope.cancel()
@classmethod
@abstractmethod
async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None:
"""
This is where subclasses should override
"""
...
```
#### File: trinity/tools/_component_isolation.py
```python
import logging
from pathlib import Path
import subprocess
import tempfile
from async_service import Service, background_asyncio_service, background_trio_service
from asyncio_run_in_process.typing import SubprocessKwargs
from eth_utils.toolz import merge
from lahja import EndpointAPI, BaseEvent
from trinity.boot_info import BootInfo
from trinity.extensibility import AsyncioIsolatedComponent, TrioIsolatedComponent
class IsStarted(BaseEvent):
def __init__(self, path: Path) -> None:
self.path = path
class ComponentTestService(Service):
logger = logging.getLogger('trinity.testing.ServiceForTest')
def __init__(self, event_bus: EndpointAPI) -> None:
self.event_bus = event_bus
async def run(self) -> None:
path = Path(tempfile.NamedTemporaryFile().name)
self.logger.debug('Broadcasting `IsStarted(%s)`', path)
try:
await self.event_bus.broadcast(IsStarted(path))
self.logger.debug('Waiting for cancellation')
await self.manager.wait_finished()
except BaseException as err:
self.logger.debug('Exiting due to error: `%r`', err)
finally:
self.logger.debug('Got cancellation: touching `%s`', path)
path.touch()
class AsyncioComponentForTest(AsyncioIsolatedComponent):
name = "component-test"
endpoint_name = 'component-test'
logger = logging.getLogger('trinity.testing.ComponentForTest')
def get_subprocess_kwargs(self) -> SubprocessKwargs:
return merge(
super().get_subprocess_kwargs(),
{
'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
}
)
@property
def is_enabled(self) -> bool:
return True
@classmethod
async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None:
cls.logger.debug('Entered `do_run`')
service = ComponentTestService(event_bus)
try:
async with background_asyncio_service(service) as manager:
cls.logger.debug('Running service')
try:
await manager.wait_finished()
finally:
cls.logger.debug('Exiting `do_run`')
finally:
# XXX: We never reach this line, so if you run test_isolated_component.py by itself it
# will pass but hang forever after pytest reports success.
# Figuring this out is probably the key to fixing our shutdown.
cls.logger.debug('Finished: `do_run`')
class TrioComponentForTest(TrioIsolatedComponent):
name = "component-test-trio"
endpoint_name = 'component-test-trio'
@property
def is_enabled(self) -> bool:
return True
@classmethod
async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None:
cls.logger.debug('Entered `do_run`')
service = ComponentTestService(event_bus)
try:
async with background_trio_service(service) as manager:
cls.logger.debug('Running service')
try:
await manager.wait_finished()
finally:
cls.logger.debug('Exiting `do_run`')
finally:
cls.logger.debug('Finished: `do_run`')
``` |
{
"source": "1k-ct/send-cost",
"score": 2
} |
#### File: send-cost/aws_fee_to_line/parse_cost.py
```python
from dotenv import load_dotenv
import os
import datetime
# import dateutil
import time
# from dateutil.tz import tzlocal
import boto3
import requests
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
client = boto3.client('cloudwatch', region_name='us-east-1')
# 0.22sec
def parse_services():
response = client.list_metrics(
Namespace="AWS/Billing", MetricName="EstimatedCharges"
)
services = []
for iRes in response["Metrics"]:
value = iRes["Dimensions"][0]["Value"]
if iRes["Dimensions"][0]["Name"] == "ServiceName":
services.append(value)
return services
def parse_total_service_billing():
response = client.get_metric_statistics(
Namespace='AWS/Billing',
MetricName='EstimatedCharges',
Dimensions=[
{
'Name': 'Currency',
'Value': 'USD'
}
],
StartTime=datetime.datetime.today() - datetime.timedelta(days=1),
EndTime=datetime.datetime.today(),
Period=86400,
Statistics=['Maximum'])
return response
# AWSDataTransfer 0.0$
# awskms 1.07$
# AmazonEC2 0.0$
# AWSELB 0.0$
# AWSCloudTrail 0.0$
# AmazonRDS 0.21$
# AmazonS3 0.01$
# AWSMarketplace 0.0$
# AmazonCloudWatch 0.0$
# AWSSecretsManager 0.0$
# AmazonRoute53
def parse_service_billing(service_name):
start = time.time()
if service_name == "Total":
return
response = client.get_metric_statistics(
Namespace='AWS/Billing',
MetricName='EstimatedCharges',
Dimensions=[
{
'Name': 'Currency',
'Value': 'USD'
}, {
'Name': 'ServiceName',
"Value": service_name,
}
],
StartTime=datetime.datetime.today() - datetime.timedelta(days=1),
EndTime=datetime.datetime.today(),
Period=86400,
Statistics=['Maximum'])
elapsed_time = time.time() - start
return response
# print(parse_service_billing("AWSDataTransfer"))
# LINEPOSTURL = os.environ['LINEPostURL']
# LINETOKEN = os.environ['LINEtoken']
# # TODO あとで消す
load_dotenv()
LINEPOSTURL = os.getenv("LINEPostURL")
LINETOKEN = os.getenv("LINEtoken")
headers = {"Authorization": "Bearer " + LINETOKEN}
def make_payload():
metric_statistics = parse_total_service_billing()
cost = metric_statistics['Datapoints'][0]['Maximum']
date = metric_statistics['Datapoints'][0]['Timestamp'].strftime(
'%Y年%m月%d日')
message = "\n" + date
services = parse_services()
for service in services:
response = parse_service_billing(service)
value = str(response["Datapoints"][0]["Maximum"])
message += "\n"+value+"$"+" "+service
message += "\n合計"+str(cost)+"$"
return message
def lambda_handler(event, context):
message = make_payload()
payload = {"message": message}
try:
req = requests.post(LINEPOSTURL, headers=headers, params=payload)
except requests.exceptions.RequestException as e:
logger.error("Request failed: %s", e)
# TODO あとで消す
# lambda_handler("", "")
``` |
{
"source": "1King-coder/Developing-Some-Python-Skills",
"score": 3
} |
#### File: Singleton/Borg/Monostate_Borg_2.py
```python
class StrReprMixin:
def __str__(self):
params = ', '.join(
[f'{k}={v}' for k, v in self.__dict__.items()]
)
return f'{self.__class__.__name__} ({params})'
def __repr__(self):
return self.__str__()
class A(StrReprMixin):
def __init__(self, nome):
self.x = 10
self.y = 20
self.nome = nome
class MonoState(StrReprMixin):
_state: dict = {
'x': 200,
'y': 900,
'nome': 'Vitor'
}
def __new__(cls, *args, **kwargs):
obj = super().__new__(cls)
obj.__dict__ = cls._state
return obj
def __init__(self, nome=None):
if nome is not None:
self.nome = nome
class B(MonoState):
pass
if __name__ == '__main__':
m1 = MonoState('Vitor')
print(m1)
m2 = MonoState()
print(m1)
b = B()
print(b)
``` |
{
"source": "1King-coder/Graphs-creation-for-socfilhist-project",
"score": 3
} |
#### File: modules/Data_Classes/Answers.py
```python
from abc import ABC
class Answers(ABC):
def __init__ (self, answers_obj) -> None:
self.answers: list = list(answers_obj.items())[1:]
@property
def total_answers(self) -> dict:
total_option_1, total_option_2, total_option_3 = [0, 0, 0]
for _, value in self.answers:
if (value == 1):
total_option_1 += 1
if (value == 2):
total_option_2 += 1
if (value == 3):
total_option_3 += 1
return {
'total_option_1': total_option_1,
'total_option_2': total_option_2,
'total_option_3': total_option_3
}
@property
def predominant_options(self) -> str:
total_options: dict = {
"option 1": self.total_answers['total_option_1'],
"option 2": self.total_answers['total_option_2'],
"option 3": self.total_answers['total_option_3']
}
predominant_options: str = ''
major: int = 0
for option, total in total_options.items():
if total > major:
major = total
predominant_options = option
elif total == major:
predominant_options += f', {option}'
return predominant_options
```
#### File: Graphs-creation-for-socfilhist-project/modules/log.py
```python
from datetime import datetime
def log(function):
def wrapper():
with open('./Logs/log.txt', 'a') as log:
log.writelines(
f'Function {function.__name__} initialized at'
f' {datetime.now()} \n'
)
return function
return wrapper()
``` |
{
"source": "1King-coder/LoginSistemWithMySQL",
"score": 3
} |
#### File: Login_sys/Data_base/Data_base.py
```python
import pymysql.cursors
from abc import ABC, abstractmethod
import hashlib
import re
class DataBase(ABC):
"""
Base class for DataBases
"""
@abstractmethod
def register_new_user(self, username, password, email): pass
@abstractmethod
def verify_if_user_is_registered(self, username): pass
@abstractmethod
def get_user_id(self, username): pass
@abstractmethod
def get_username(self, id_): pass
@abstractmethod
def get_email(self, id_): pass
@abstractmethod
def confirm_login(self, username, password, email): pass
@abstractmethod
def list_registered_users(self): pass
@abstractmethod
def delete_user_from_database(self, username, email): pass
@abstractmethod
def change_password(self, password, new_password): pass
@abstractmethod
def change_username(self, username, new_username): pass
class Usuarios(DataBase):
"""
Usuarios Database class.
"""
_state: dict = {
# Establish a unique connection with the Database
'conection': pymysql.connect(
host='localhost',
user='root',
password='Password',
db='users',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
}
def __new__(cls, *args, **kwargs):
obj = super().__new__(cls)
obj.__dict__ = cls._state
return obj
def __init__(self):
self.conection = self._state['conection']
self.cursor = self.conection.cursor()
def register_new_user(self, username, email, password):
"""
Insert a new user in the
DB table.
"""
enc_pass = hashlib.sha256(password.strip().encode('utf-8')).hexdigest()
self.cursor.execute('INSERT INTO users.users_login (username,'
' email, password_hash) VALUES'
'(%s, %s, %s)', [username, email, enc_pass])
self.conection.commit()
def verify_if_user_is_registered(self, username):
"""
Verify if the user's username
is already in the DB table.
"""
self.cursor.execute('SELECT username FROM users.users_login'
f" WHERE username='{username}'")
result = self.cursor.fetchall()
return result
def get_user_id(self, user_input):
"""
Get the user id by his username
or e-mail.
"""
if re.search(r'@.*\.com', user_input):
self.cursor.execute('SELECT id FROM users.users_login'
f" WHERE email='{user_input}'")
return self.cursor.fetchall()[0]['id']
self.cursor.execute('SELECT id FROM users.users_login'
f" WHERE username='{user_input}'")
result = self.cursor.fetchall()
return result[0]['id'] if result else None
def get_username(self, user_input):
"""
Get user's username by his id.
"""
user_id = self.get_user_id(user_input)
self.cursor.execute('SELECT username FROM users.users_login'
f" WHERE id='{user_id}'")
return self.cursor.fetchall()[0]['username']
def get_email(self, user_input):
"""
Get user's e-mail by his id.
"""
user_id = self.get_user_id(user_input)
self.cursor.execute('SELECT email FROM users.users_login'
f" WHERE id='{user_id}'")
return self.cursor.fetchall()[0]['email']
def confirm_login(self, user_input, password):
"""
Makes the user's login input
validation for logging in the
system.
"""
user_id = self.get_user_id(user_input)
if not user_id:
return None
enc_pass = hashlib.sha256(password.strip().encode('utf-8')).hexdigest()
self.cursor.execute('SELECT password_hash FROM users.users_login'
f" WHERE id='{user_id}'")
confirmation = enc_pass == self.cursor.fetchall()[0]['password_hash']
return 'Confirm' if confirmation else 'Invalid password'
def list_registered_users(self):
self.cursor.execute('Select * FROM users.users_login')
return self.cursor.fetchall()
def delete_user_from_database(self, user_input):
"""
Delete a user in Database's Table
using his id.
"""
try:
user_id = int(user_input)
except Exception:
user_id = self.get_user_id(user_input)
if not user_id:
return None
self.cursor.execute('DELETE FROM users.users_login '
f"WHERE id='{user_id}'")
self.conection.commit()
return True
def change_password(self, user_input, new_password):
"""
Change user's password in Database's
Table using his id.
"""
user_id = self.get_user_id(user_input)
if not user_id:
return None
new_enc_pass = hashlib.sha256(
<PASSWORD>_password.strip().encode('utf-8')).hexdigest()
self.cursor.execute('UPDATE users.users_login SET '
f"password_hash='{<PASSWORD>_enc_pass}' "
f"WHERE id='{user_id}'")
self.conection.commit()
return True
def change_username(self, new_username, email):
"""
Change user's username in Database's
Table using his id.
"""
user_id = self.get_user_id(email)
if not user_id:
return None
self.cursor.execute('UPDATE users.users_login SET '
f"username='{new_username}' WHERE id='{user_id}'")
self.conection.commit()
return True
```
#### File: LoginSistemWithMySQL/Login_sys/RequestNewPassword.py
```python
from .SendEmail import SendEmail
from .Data_base.Data_base import Usuarios
from PyQt5.QtWidgets import QMainWindow, QApplication
from .Verifications_and_Responses.Verifications import Verifications
from .Verifications_and_Responses.Responses import Responses
from .RequestConfirmationCode import Request_Confirmation_Code
from .SendCode.GenerateAuthCode import Get_Auth_Code
from .SendCode.Cache import Cache
from .GraphicGui.RequestNewPasswordInterface import *
class Request_New_Password(QMainWindow, Ui_MainWindow):
"""
Class responsible for sending the authentication
code to access the recovery password window
where the user will be able to change his password.
"""
def __init__(self, parent=None):
super().__init__(parent)
super().setupUi(self)
self.setWindowTitle('Request a New Password')
self.Request_Confirmation_Code = Request_Confirmation_Code(
'Recovery Password')
self.users_db = Usuarios()
self.responses = Responses()
self.verify = Verifications()
self.Confirm.clicked.connect(self.confirm)
def get_user_input(self):
return [self.User_input.displayText()]
def confirm(self):
user_input = self.get_user_input()
if self.verify.empty_fields(user_input):
self.responses.raise_alert(self.Response,
'None of the fields can be empty.')
return
if self.verify.special_characters(user_input[0]):
self.responses.raise_alert(self.Response,
'There must not be special'
'characters. '
"(! # $ % ¨ & * + ')")
return
"""Try to get the user's input id."""
if not self.users_db.get_user_id(user_input[0]):
self.responses.raise_error(self.Response,
'Invalid Username or E-mail.')
return
"""Stores momentaly the user's input for Recovery Password."""
Cache(user_input[0])
"""Get the Auth code and send it by the user's email."""
SendEmail(Get_Auth_Code(), user_input[0]).send_email()
"""Open the Authentication window."""
self.Request_Confirmation_Code.show()
"""Clear the inputs and responser."""
self.responses.clear([self.User_input], self.Response)
self.close()
```
#### File: LoginSistemWithMySQL/Login_sys/RequestNewUsername.py
```python
from .Data_base.Data_base import Usuarios
from .GraphicGui.RequestNewUsernameInterface import *
from PyQt5.QtWidgets import QMainWindow, QApplication
from .Verifications_and_Responses.Verifications import Verifications
from .Verifications_and_Responses.Responses import Responses
from .SendEmail import SendEmail
from .SendCode.Cache import Cache
from .SendCode.GenerateAuthCode import Get_Auth_Code
from .RequestConfirmationCode import Request_Confirmation_Code
class Request_New_Username(QMainWindow, Ui_MainWindow):
"""
Class responsible for sending the authentication
code to access the recovery username window
where the user will be able to change his username.
"""
def __init__(self, parent=None):
super().__init__(parent)
super().setupUi(self)
self.setWindowTitle('Request a New Username')
self.Request_Confirmation_Code = Request_Confirmation_Code(
'Recovery Username'
)
self.users_db = Usuarios()
self.auth_code = Get_Auth_Code()
self.responses = Responses()
self.verify = Verifications()
self.Send_Email.clicked.connect(self.next_window)
def get_user_email(self) -> list:
return [self.Email.text().strip()]
def next_window(self):
email = self.get_user_email()
if self.verify.empty_fields(email):
self.responses.raise_alert(self.Response,
'None of the fields can be empty.')
return
if self.verify.special_characters(email[0]):
self.responses.raise_alert(self.Response,
'There must not be special'
'characters. '
"(! # $ % ¨ & * + ')")
return
if not self.users_db.get_user_id(email[0]):
self.responses.raise_error(self.Response, 'Invalid Password.')
return
if not self.verify.is_email(email[0]):
self.responses.raise_error(self.Response, 'Invalid E-mail.')
return
"""Get the Auth code and send it by the user's email."""
SendEmail(self.auth_code, email[0]).send_email()
"""Open the Authentication window."""
self.Request_Confirmation_Code.show()
"""Clear the inputs and responser."""
self.responses.clear([self.Email], self.Response)
self.close()
```
#### File: Login_sys/SendCode/GenerateAuthCode.py
```python
from random import randint
def generate_auth_code() -> str:
"""
Generate a six-numbers random code.
"""
code = ''
for _ in range(6):
code += str(randint(0, 9))
return code
class Get_Auth_Code:
"""
Monostate for storing a generated code
"""
_state: dict = {
'AuthCode': generate_auth_code()
}
def __new__(cls, *args, **kwargs):
obj = super().__new__(cls)
obj.__dict__ = cls._state
return obj
def __init__(self, flag=None):
"""
Flag for saving a code for use
and after it, generate another.
"""
if flag is not None:
self._state['AuthCode'] = generate_auth_code()
def __str__(self):
"""
Return the generated code when
the class is called as a string
"""
return self._state['AuthCode']
```
#### File: Login_sys/Verifications_and_Responses/Verifications.py
```python
from re import search
class Verifications:
"""
Makes verifications into
the inputs fields.
"""
@staticmethod
def empty_fields(fields) -> bool:
for field in fields:
if field.strip() == '':
return True
return False
@staticmethod
def is_email(input_) -> bool:
if search(r'@.*\.com', input_):
return True
else:
return False
@staticmethod
def special_characters(inputs) -> bool:
for input_ in inputs:
if search(r'.*!.*|.*#.*|.*\$.*|.*%.*'
r'|.*¨.*|.*&.*|.*\*.*|.*\+.*|.*".*|.*\'.*',
input_):
return True
return False
``` |
{
"source": "1King-coder/PythonEmailSender",
"score": 3
} |
#### File: PythonEmailSender/SendRoot/SendEmails.py
```python
from string import Template
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.mime.image import MIMEImage
from email import encoders
import os
import mimetypes
def template(template, file_name, p_name):
"""
Takes template and set html variables values, setting E-mail body.
"""
with open(f'{template}', 'r') as html:
temp = Template(html.read())
body = temp.substitute(trabalho=file_name, nome=p_name)
return body
def get_path(file_name, path):
"""
Gets file's path.
"""
path_list = []
if not file_name:
return None
for r, d, fs in os.walk(path):
for f in fs:
if file_name in f:
f_nome, ext = os.path.splitext(f)
cp_path = os.path.join(r, f)
path_list.append((f_nome, ext, cp_path))
return path_list
# Configure attachments.
def anexo(f_name, ext, cp_path):
"""
Returns attachment MIME.
"""
try:
ctype, enconding = mimetypes.guess_type(f_name)
if ctype is None or enconding is not None:
ctype = 'application/octet-stream'
maint, subt = ctype.split('/', 1)
if maint == 'image':
with open(f'{f_name}{ext}', 'rb') as img:
mime = MIMEImage(img.read(), _subtype='jpg')
else:
with open(f'{cp_path}', 'rb') as f:
mime = MIMEBase(maint, subt)
mime.set_payload(f.read())
encoders.encode_base64(mime)
mime.add_header('Content-Disposition', 'attachment',
filename=f'{f_name}{ext}')
return mime
except Exception as e:
print(f'Error: {e}')
raise e
def set_template(template_name, file_name, p_name):
"""
Insert template into the E-mail.
"""
corp = template(template_name, file_name, p_name)
msg = MIMEMultipart()
msg['from'] = p_name
msg['subject'] = f'{file_name}'
msg.attach(MIMEText(corp, 'html'))
return msg
``` |
{
"source": "1kko/ezDHLoader",
"score": 3
} |
#### File: 1kko/ezDHLoader/ezDHLoader.py
```python
import PySimpleGUI as sg
import pafy
from urllib.parse import urlparse, parse_qs
import time
import random
import configparser
import os
def output_callback(total, recvd, ratio, rate, eta):
global progress_bar
try:
# print(recvd, ratio, eta)
progress_bar.UpdateBar(ratio)
etaText = window['eta']
etaText.update("ETA: " + str(eta) + "s")
except:
pass
if __name__ == "__main__":
with_apikey = ""
try:
config = configparser.ConfigParser()
config.read('config.ini')
if config['DEFAULT']['YOUTUBE_API_KEY'] not in [None, '']:
pafy.set_api_key(config['DEFAULT']['YOUTUBE_API_KEY'])
with_apikey = " (API Key Enabled)"
except:
pass
layout = [
[sg.Text("Youtube URL:")],
[sg.InputText(size=(80, 20), key='url')],
[sg.Submit("OK"), sg.Cancel()],
[sg.ProgressBar(1, orientation='h', size=(
45, 5), key='progressbar'),
sg.Text(size=(12, 1), key='eta', justification='r')],
# [sg.Output(size=(80, 20))],
[sg.Text("Destination", size=(15, 1)), sg.InputText(os.getcwd(),
key='dstPath'), sg.FolderBrowse()],
]
window = sg.Window('ezDHLoader v0.6' + with_apikey, layout)
progress_bar = window['progressbar']
youtubeId = ""
event, values = window.read()
url = values['url']
try:
if url.startswith("http"):
res = urlparse(url)
if res.netloc == "www.youtube.com" or res.netloc == "youtube.com":
# Url starts with www.youtube.com
youtubeId = parse_qs(res.query)['v'][0]
if res.netloc == "youtu.be":
# Url starts with youtu.be
youtubeId = res.path[1:]
# download
y = pafy.new(youtubeId)
video = y.getbest()
vfilename = video.download(filepath=os.path.join(values['dstPath'], video.title+"."+video.extension),
quiet=True, callback=output_callback, remux_audio=True)
sg.Popup("Done")
except Exception as e:
sg.Popup("Oops", e)
``` |
{
"source": "1kko/python-holidays",
"score": 3
} |
#### File: holidays/countries/vietnam.py
```python
from datetime import date, datetime, timedelta
from dateutil.relativedelta import relativedelta as rd, FR, SA, MO
from holidays.constants import JAN, APR, MAY, SEP
from holidays.constants import SAT, SUN
from holidays.holiday_base import HolidayBase
class Vietnam(HolidayBase):
# https://publicholidays.vn/
# http://vbpl.vn/TW/Pages/vbpqen-toanvan.aspx?ItemID=11013 Article.115
# https://www.timeanddate.com/holidays/vietnam/
def __init__(self, **kwargs):
self.country = "VN"
HolidayBase.__init__(self, **kwargs)
def _populate(self, year):
# New Year's Day
name = "International New Year's Day"
first_date = date(year, JAN, 1)
self[first_date] = name
if self.observed:
self[first_date] = name
if first_date.weekday() == SAT:
self[first_date + rd(days=+2)] = name + " observed"
elif first_date.weekday() == SUN:
self[first_date + rd(days=+1)] = name + " observed"
# Lunar New Year
name = ["Vietnamese New Year", # index: 0
"The second day of Tet Holiday", # index: 1
"The third day of Tet Holiday", # index: 2
"The forth day of Tet Holiday", # index: 3
"The fifth day of Tet Holiday", # index: 4
"Vietnamese New Year's Eve", # index: -1
]
dt = self.get_solar_date(year, 1, 1)
new_year_date = date(dt.year, dt.month, dt.day)
if self.observed:
for i in range(-1, 5, 1):
tet_day = new_year_date + rd(days=+i)
self[tet_day] = name[i]
# Vietnamese Kings' Commemoration Day
# https://en.wikipedia.org/wiki/H%C3%B9ng_Kings%27_Festival
if year >= 2007:
name = "Hung Kings Commemoration Day"
dt = self.get_solar_date(year, 3, 10)
king_hung_date = date(dt.year, dt.month, dt.day)
self[king_hung_date] = name
else:
pass
# Liberation Day/Reunification Day
name = "Liberation Day/Reunification Day"
libration_date = date(year, APR, 30)
self[libration_date] = name
# International Labor Day
name = "International Labor Day"
labor_date = date(year, MAY, 1)
self[labor_date] = name
# Independence Day
name = "Independence Day"
independence_date = date(year, SEP, 2)
self[independence_date] = name
# Store the number of days per year from 1901 to 2099, and the number of
# days from the 1st to the 13th to store the monthly (including the month
# of the month), 1 means that the month is 30 days. 0 means the month is
# 29 days. The 12th to 15th digits indicate the month of the next month.
# If it is 0x0F, it means that there is no leap month.
g_lunar_month_days = [
0xF0EA4, 0xF1D4A, 0x52C94, 0xF0C96, 0xF1536,
0x42AAC, 0xF0AD4, 0xF16B2, 0x22EA4, 0xF0EA4, # 1901-1910
0x6364A, 0xF164A, 0xF1496, 0x52956, 0xF055A,
0xF0AD6, 0x216D2, 0xF1B52, 0x73B24, 0xF1D24, # 1911-1920
0xF1A4A, 0x5349A, 0xF14AC, 0xF056C, 0x42B6A,
0xF0DA8, 0xF1D52, 0x23D24, 0xF1D24, 0x61A4C, # 1921-1930
0xF0A56, 0xF14AE, 0x5256C, 0xF16B4, 0xF0DA8,
0x31D92, 0xF0E92, 0x72D26, 0xF1526, 0xF0A56, # 1931-1940
0x614B6, 0xF155A, 0xF0AD4, 0x436AA, 0xF1748,
0xF1692, 0x23526, 0xF152A, 0x72A5A, 0xF0A6C, # 1941-1950
0xF155A, 0x52B54, 0xF0B64, 0xF1B4A, 0x33A94,
0xF1A94, 0x8152A, 0xF152E, 0xF0AAC, 0x6156A, # 1951-1960
0xF15AA, 0xF0DA4, 0x41D4A, 0xF1D4A, 0xF0C94,
0x3192E, 0xF1536, 0x72AB4, 0xF0AD4, 0xF16D2, # 1961-1970
0x52EA4, 0xF16A4, 0xF164A, 0x42C96, 0xF1496,
0x82956, 0xF055A, 0xF0ADA, 0x616D2, 0xF1B52, # 1971-1980
0xF1B24, 0x43A4A, 0xF1A4A, 0xA349A, 0xF14AC,
0xF056C, 0x60B6A, 0xF0DAA, 0xF1D92, 0x53D24, # 1981-1990
0xF1D24, 0xF1A4C, 0x314AC, 0xF14AE, 0x829AC,
0xF06B4, 0xF0DAA, 0x52D92, 0xF0E92, 0xF0D26, # 1991-2000
0x42A56, 0xF0A56, 0xF14B6, 0x22AB4, 0xF0AD4,
0x736AA, 0xF1748, 0xF1692, 0x53526, 0xF152A, # 2001-2010
0xF0A5A, 0x4155A, 0xF156A, 0x92B54, 0xF0BA4,
0xF1B4A, 0x63A94, 0xF1A94, 0xF192A, 0x42A5C, # 2011-2020
0xF0AAC, 0xF156A, 0x22B64, 0xF0DA4, 0x61D52,
0xF0E4A, 0xF0C96, 0x5192E, 0xF1956, 0xF0AB4, # 2021-2030
0x315AC, 0xF16D2, 0xB2EA4, 0xF16A4, 0xF164A,
0x63496, 0xF1496, 0xF0956, 0x50AB6, 0xF0B5A, # 2031-2040
0xF16D4, 0x236A4, 0xF1B24, 0x73A4A, 0xF1A4A,
0xF14AA, 0x5295A, 0xF096C, 0xF0B6A, 0x31B54, # 2041-2050
0xF1D92, 0x83D24, 0xF1D24, 0xF1A4C, 0x614AC,
0xF14AE, 0xF09AC, 0x40DAA, 0xF0EAA, 0xF0E92, # 2051-2060
0x31D26, 0xF0D26, 0x72A56, 0xF0A56, 0xF14B6,
0x52AB4, 0xF0AD4, 0xF16CA, 0x42E94, 0xF1694, # 2061-2070
0x8352A, 0xF152A, 0xF0A5A, 0x6155A, 0xF156A,
0xF0B54, 0x4174A, 0xF1B4A, 0xF1A94, 0x3392A, # 2071-2080
0xF192C, 0x7329C, 0xF0AAC, 0xF156A, 0x52B64,
0xF0DA4, 0xF1D4A, 0x41C94, 0xF0C96, 0x8192E, # 2081-2090
0xF0956, 0xF0AB6, 0x615AC, 0xF16D4, 0xF0EA4,
0x42E4A, 0xF164A, 0xF1516, 0x22936, # 2090-2099
]
# Define range of years
START_YEAR, END_YEAR = 1901, 1900 + len(g_lunar_month_days)
# 1901 The 1st day of the 1st month of the Gregorian calendar is 1901/2/19
LUNAR_START_DATE, SOLAR_START_DATE = (1901, 1, 1), datetime(1901, 2, 19)
# The Gregorian date for December 30, 2099 is 2100/2/8
LUNAR_END_DATE, SOLAR_END_DATE = (2099, 12, 30), datetime(2100, 2, 18)
def get_leap_month(self, lunar_year):
return (self.g_lunar_month_days[lunar_year - self.START_YEAR] >> 16) \
& 0x0F
def lunar_month_days(self, lunar_year, lunar_month):
return 29 + ((self.g_lunar_month_days[lunar_year - self.START_YEAR] >>
lunar_month) & 0x01)
def lunar_year_days(self, year):
days = 0
months_day = self.g_lunar_month_days[year - self.START_YEAR]
for i in range(1, 13 if self.get_leap_month(year) == 0x0F else 14):
day = 29 + ((months_day >> i) & 0x01)
days += day
return days
# Calculate the Gregorian date according to the lunar calendar
def get_solar_date(self, year, month, day):
span_days = 0
for y in range(self.START_YEAR, year):
span_days += self.lunar_year_days(y)
leap_month = self.get_leap_month(year)
for m in range(1, month + (month > leap_month)):
span_days += self.lunar_month_days(year, m)
span_days += day - 1
return self.SOLAR_START_DATE + timedelta(span_days)
class VN(Vietnam):
pass
``` |
{
"source": "1Konny/idgan",
"score": 2
} |
#### File: idgan/data/preprocess.py
```python
import os
import sys
from PIL import Image
from pathlib import Path
from torchvision import transforms
def preprocess_celeba(path):
crop = transforms.CenterCrop((160, 160))
resample = Image.LANCZOS
img = Image.open(path)
img = crop(img)
img_256_path = celeba_256_dir / path.name
img.resize((256, 256), resample=resample).save(img_256_path)
img_128_path = celeba_128_dir / path.name
img.resize((128, 128), resample=resample).save(img_128_path)
img_64_path = celeba_64_dir / path.name
img.resize((64, 64), resample=resample).save(img_64_path)
return None
def preprocess_chairs(path):
crop = transforms.CenterCrop((450, 450))
resample = Image.LANCZOS
img = Image.open(path)
img = crop(img)
img_64_path = chairs_64_dir / path.parts[-3] / path.name
img_64_path.parent.mkdir(parents=True, exist_ok=True)
img.resize((64, 64), resample=resample).save(img_64_path)
return None
def preprocess_cars(meta):
resample = Image.LANCZOS
path, (x1, y1, x2, y2) = meta
img = Image.open(path).crop((x1, y1, x2, y2))
if min(img.size) < 64:
return
img_64_path = cars_64_dir / path.parts[-2] / path.parts[-1]
img_64_path.parent.mkdir(parents=True, exist_ok=True)
img.resize((64, 64), resample=resample).save(img_64_path)
return None
if __name__ == '__main__':
dataset = sys.argv[1]
dataroot = Path('data')
if dataset == 'celeba':
preprocess = preprocess_celeba
celeba_64_dir = dataroot / 'CelebA_64' / 'images'
celeba_64_dir.mkdir(parents=True, exist_ok=True)
celeba_128_dir = dataroot / 'CelebA_128' / 'images'
celeba_128_dir.mkdir(parents=True, exist_ok=True)
celeba_256_dir = dataroot / 'CelebA_256' / 'images'
celeba_256_dir.mkdir(parents=True, exist_ok=True)
os.system('unzip -q %s -d %s' % ((dataroot / 'img_align_celeba.zip'), dataroot))
paths = list((dataroot / 'img_align_celeba').glob('*.jpg'))
elif dataset == 'celeba-hq':
preprocess = preprocess_celeba
os.system('unzip -q %s -d %s' % ((dataroot / 'data1024x1024.zip'), (dataroot / 'CelebA-HQ')))
paths = []
elif dataset == 'chairs':
preprocess = preprocess_chairs
chairs_64_dir = dataroot / 'Chairs_64'
os.system('tar -xf %s -C %s' % ((dataroot / 'rendered_chairs.tar'), dataroot))
paths = list((dataroot / 'rendered_chairs').glob('**/*.png'))
elif dataset == 'cars':
preprocess = preprocess_cars
cars_64_dir = dataroot / 'Cars_64'
cars_64_dir.mkdir(parents=True, exist_ok=True)
os.system('tar -xf %s -C %s' % ((dataroot / 'cars_train.tgz'), dataroot))
os.system('tar -xf %s -C %s' % ((dataroot / 'cars_test.tgz'), dataroot))
os.system('tar -xf %s -C %s' % ((dataroot / 'car_devkit.tgz'), dataroot))
from scipy import io
train_annos = io.loadmat(dataroot / 'devkit' / 'cars_train_annos.mat')
test_annos = io.loadmat(dataroot / 'devkit' / 'cars_test_annos.mat')
paths = []
for anno in train_annos['annotations'][0]:
path = dataroot / 'cars_train' / anno[-1][0]
x1, x2, y1, y2 = anno[0][0][0], anno[1][0][0], anno[2][0][0], anno[3][0][0]
paths.append([path, (x1, x2, y1, y2)])
for anno in test_annos['annotations'][0]:
path = dataroot / 'cars_test' / anno[-1][0]
x1, x2, y1, y2 = anno[0][0][0], anno[1][0][0], anno[2][0][0], anno[3][0][0]
paths.append([path, (x1, x2, y1, y2)])
from multiprocessing import Pool
with Pool(16) as pool:
pool.map(preprocess, paths)
```
#### File: gan_training/models/resnet.py
```python
import torch
from torch import nn
from torch.nn import functional as F
from torch.autograd import Variable
import torch.utils.data
import torch.utils.data.distributed
import numpy as np
class View(nn.Module):
def __init__(self, size):
super(View, self).__init__()
self.size = size
def forward(self, tensor):
return tensor.view(self.size)
class AdaptiveInstanceNorm(nn.Module):
def __init__(self, mode='channel', eps=1e-5):
assert mode in ['channel', 'spatial']
super(AdaptiveInstanceNorm, self).__init__()
self.mode = mode
self.eps = eps
self.normalizer = self._get_normalizer(mode)
self.mean = None
self.std = None
def forward(self, x):
x_normalised = self.normalizer(x)
out = (x_normalised).mul(self.std).add(self.mean)
return out
def spatial_normalization(self, x):
assert x.ndimension() == 4
x_mean = x.mean(1, keepdim=True)
x_var = x.var(1, unbiased=False, keepdim=True)
x_normalised = (x-x_mean).div((x_var+self.eps).sqrt())
return x_normalised
def channel_normalization(self, x):
assert x.ndimension() == 4
x_normalised = F.instance_norm(x)
return x_normalised
def update(self, mean, std):
self.mean = mean
self.std = std
def _get_normalizer(self, mode):
return self.channel_normalization if mode == 'channel' else self.spatial_normalization
class SubMapper(nn.Module):
def __init__(self, in_channels, out_channels, adain):
super(SubMapper, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.adain = adain
self.mapping = nn.Sequential(
nn.Conv2d(in_channels, out_channels*2, 1),
)
def forward(self, w_base):
w = self.mapping(w_base)
mean = w[:, :self.out_channels]
std = w[:, self.out_channels:]
self.adain.update(mean, std)
return w_base
class Mapper(nn.Module):
def __init__(self, submappers, z2_dim, hidden_dim=256):
super(Mapper, self).__init__()
self.z2_dim = z2_dim
self.hidden_dim = hidden_dim
self.base_mapping = nn.Sequential(
nn.Linear(z2_dim, hidden_dim),
nn.LeakyReLU(0.2, True),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(0.2, True),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(0.2, True),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(0.2, True),
View((-1, hidden_dim, 1, 1))
)
self.submappers = nn.Sequential(*submappers)
def forward(self, z2):
base_w = self.base_mapping(z2)
for submapper in self.submappers:
submapper(base_w)
return None
class GeneratorAdaIN(nn.Module):
def __init__(self, z_dim, nlabels, size, embed_size=256, nfilter=64, nfilter_max=512, **kwargs):
super().__init__()
s0 = self.s0 = 4
nf = self.nf = nfilter
nf_max = self.nf_max = nfilter_max
self.z_dim = z_dim
# Submodules
nlayers = int(np.log2(size / s0))
map_base_dim = min(nf * 2**(nlayers-0), nf_max)
self.nf0 = min(nf_max, nf * 2**nlayers)
self.const_base = nn.Parameter(torch.ones(1, map_base_dim, 4, 4))
self.const_bias = nn.Parameter(torch.ones(1, map_base_dim, 1, 1))
self.embedding = nn.Embedding(nlabels, embed_size)
self.fc = nn.Linear(z_dim + embed_size, self.nf0*s0*s0)
blocks = []
submappers = []
for i in range(nlayers):
nf0 = min(nf * 2**(nlayers-i), nf_max)
nf1 = min(nf * 2**(nlayers-i-1), nf_max)
adain = AdaptiveInstanceNorm()
submappers += [SubMapper(map_base_dim, nf0, adain)]
blocks += [
adain,
ResnetBlock(nf0, nf1),
nn.Upsample(scale_factor=2)
]
adain = AdaptiveInstanceNorm()
submappers += [SubMapper(map_base_dim, nf, adain)]
blocks += [
adain,
ResnetBlock(nf, nf),
]
self.mapping = Mapper(submappers, z_dim+nlabels, hidden_dim=map_base_dim)
self.resnet = nn.Sequential(*blocks)
self.conv_img = nn.Conv2d(nf, 3, 3, padding=1)
def forward(self, z, y):
assert(z.size(0) == y.size(0))
batch_size = z.size(0)
if y.dtype is torch.int64:
yembed = self.embedding(y)
else:
yembed = y
yembed = yembed / torch.norm(yembed, p=2, dim=1, keepdim=True)
yz = torch.cat([z, yembed], dim=1)
self.mapping(yz)
input = self.const_base + self.const_bias
out = self.resnet(input)
out = self.conv_img(actvn(out))
out = F.tanh(out)
return out
class Generator(nn.Module):
def __init__(self, z_dim, nlabels, size, embed_size=256, nfilter=64, nfilter_max=512, **kwargs):
super().__init__()
s0 = self.s0 = 4
nf = self.nf = nfilter
nf_max = self.nf_max = nfilter_max
self.z_dim = z_dim
# Submodules
nlayers = int(np.log2(size / s0))
self.nf0 = min(nf_max, nf * 2**nlayers)
self.embedding = nn.Embedding(nlabels, embed_size)
self.fc = nn.Linear(z_dim + embed_size, self.nf0*s0*s0)
blocks = []
for i in range(nlayers):
nf0 = min(nf * 2**(nlayers-i), nf_max)
nf1 = min(nf * 2**(nlayers-i-1), nf_max)
blocks += [
ResnetBlock(nf0, nf1),
nn.Upsample(scale_factor=2)
]
blocks += [
ResnetBlock(nf, nf),
]
self.resnet = nn.Sequential(*blocks)
self.conv_img = nn.Conv2d(nf, 3, 3, padding=1)
def forward(self, z, y):
assert(z.size(0) == y.size(0))
batch_size = z.size(0)
if y.dtype is torch.int64:
yembed = self.embedding(y)
else:
yembed = y
yembed = yembed / torch.norm(yembed, p=2, dim=1, keepdim=True)
yz = torch.cat([z, yembed], dim=1)
out = self.fc(yz)
out = out.view(batch_size, self.nf0, self.s0, self.s0)
out = self.resnet(out)
out = self.conv_img(actvn(out))
out = F.tanh(out)
return out
class Discriminator(nn.Module):
def __init__(self, z_dim, nlabels, size, embed_size=256, nfilter=64, nfilter_max=1024):
super().__init__()
self.embed_size = embed_size
s0 = self.s0 = 4
nf = self.nf = nfilter
nf_max = self.nf_max = nfilter_max
# Submodules
nlayers = int(np.log2(size / s0))
self.nf0 = min(nf_max, nf * 2**nlayers)
blocks = [
ResnetBlock(nf, nf)
]
for i in range(nlayers):
nf0 = min(nf * 2**i, nf_max)
nf1 = min(nf * 2**(i+1), nf_max)
blocks += [
nn.AvgPool2d(3, stride=2, padding=1),
ResnetBlock(nf0, nf1),
]
self.conv_img = nn.Conv2d(3, 1*nf, 3, padding=1)
self.resnet = nn.Sequential(*blocks)
self.fc = nn.Linear(self.nf0*s0*s0, nlabels)
self.hiddens = dict()
def named_hook(name):
def hook(module, input, output):
self.hiddens[name] = output.view(output.size(0), -1)
return None
return hook
# self.resnet.register_forward_hook(named_hook('ds'))
def forward(self, x, y, zs=None):
assert(x.size(0) == y.size(0))
batch_size = x.size(0)
out = self.conv_img(x)
out = self.resnet(out)
out = out.view(batch_size, self.nf0*self.s0*self.s0)
out_temp = out
# ds_ratio = None
# if zs is not None:
# bs_half = int(batch_size//2)
# #out1, out2 = out[:bs_half], out[bs_half:bs_half+bs_half]
# out1, out2 = self.hiddens['ds'][:bs_half], self.hiddens['ds'][bs_half:bs_half+bs_half]
# z1, z2 = zs[:bs_half], zs[bs_half:bs_half+bs_half]
# ds_ratio = (out1-out2).view(bs_half, -1).norm(p=2, dim=1).div((z1-z2).norm(p=2, dim=1)).mean()
out = self.fc(actvn(out))
index = Variable(torch.LongTensor(range(out.size(0))))
if y.is_cuda:
index = index.cuda()
out = out[index, y]
#return out, ds_ratio
return out, out_temp
class ResnetBlock(nn.Module):
def __init__(self, fin, fout, fhidden=None, is_bias=True):
super().__init__()
# Attributes
self.is_bias = is_bias
self.learned_shortcut = (fin != fout)
self.fin = fin
self.fout = fout
if fhidden is None:
self.fhidden = min(fin, fout)
else:
self.fhidden = fhidden
# Submodules
self.conv_0 = nn.Conv2d(self.fin, self.fhidden, 3, stride=1, padding=1)
self.conv_1 = nn.Conv2d(self.fhidden, self.fout, 3, stride=1, padding=1, bias=is_bias)
if self.learned_shortcut:
self.conv_s = nn.Conv2d(self.fin, self.fout, 1, stride=1, padding=0, bias=False)
def forward(self, x):
x_s = self._shortcut(x)
dx = self.conv_0(actvn(x))
dx = self.conv_1(actvn(dx))
out = x_s + 0.1*dx
return out
def _shortcut(self, x):
if self.learned_shortcut:
x_s = self.conv_s(x)
else:
x_s = x
return x_s
def actvn(x):
out = F.leaky_relu(x, 2e-1)
return out
``` |
{
"source": "1kpp/python_trainnig",
"score": 3
} |
#### File: python_trainnig/test/test_add_contact_to_group.py
```python
from model.contact import Contact
from model.group import Group
import random
def test_add_contact_to_group(app, db, check_ui):
# checks whether there are contacts available. If not - create one
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="Name for deletion"))
# check whether there are groups available. If not - create one
if len(db.get_group_list()) == 0:
app.group.create(Group(name="azazazaz"))
# check whether there are free contacts (not part of any group)
if len(db.get_contacts_not_in_any_of_groups()) == 0:
app.contact.create(Contact(firstname="Contact not in groups"))
# check whether there are free groups (do not have any contacts inside)
if len(db.get_groups_without_contacts()) == 0:
app.group.create(Group(name="Group without contacts"))
# choose random contact to add
random_contact = random.choice(db.get_contacts_not_in_any_of_groups())
# choose random group for contact addition
random_group = random.choice(db.get_groups_without_contacts())
# add contact to group
app.contact.add_contact_to_group(random_contact.id, random_group.id)
# assert that random_contact is in list of contacts of random_group
assert random_contact in db.get_contact_in_group(random_group)
``` |
{
"source": "1kpp/stepik_course",
"score": 4
} |
#### File: 1kpp/stepik_course/module3_lesson6.py
```python
from selenium import webdriver
import math
#define a function wich calculates and returnes the result in accordance with the "X"
def calc(x):
return str(math.log(abs(12*math.sin(int(x)))))
driver = webdriver.Chrome()
#open the browser
link = "http://SunInJuly.github.io/execute_script.html"
driver.get(link)
#find element which contains "X" and calculates the function
x_element = driver.find_element_by_id("input_value")
x = x_element.text
y = calc(x)
#enter the result of calculations into the text area
driver.find_element_by_id("answer").send_keys(y)
#scroll down the page to make another elements visible
submit = driver.find_element_by_css_selector("[class='btn btn-primary']")
driver.execute_script("return arguments[0].scrollIntoView(true);", submit)
submit.click()
#check radiobutton and checkbox
option1 = driver.find_element_by_css_selector("[for='robotCheckbox']")
option1.click()
option2 = driver.find_element_by_css_selector("[for='robotsRule']")
option2.click()
#click submit
submit.click()
``` |
{
"source": "1KVueltasAlCampo/VIP-KNN",
"score": 3
} |
#### File: src/visualization/visualizer.py
```python
import numpy as np
import plotly.express as px
import plotly.graph_objects as go
#Initial state. Base dataset.
##def __init__ visualizer(self):
##Falta que lo clasifique.
def add_dot(sepal_width, sepal_length, color):
df = px.data.iris()
newDyc = {"sepal_width": sepal_width, "sepal_length": sepal_length, "species": color}
df = df.append(newDyc, ignore_index=True)
newFig = px.scatter(df, x="sepal_width", y="sepal_length", color="species")
newFig.show()
# KNN.fit(X, Y)
#KNN.askNewData()
if __name__ == "__main__":
df = px.data.iris()
speciesDyc = {"setosa": 0, "versicolor": 1, "virginica": 2}
X = df["sepal_width", "sepal_length"].values
Y = df["species"].apply(speciesDyc.get).values
print(np.isnan(Y).sum() == 0)
df = df[["sepal_width", "sepal_length", "species"]]
fig = px.scatter(df, x="sepal_width", y="sepal_length", color="species")
fig.show()
fig.show()
``` |
{
"source": "1kyu/qtile",
"score": 2
} |
#### File: libqtile/backend/__init__.py
```python
import importlib
from libqtile.utils import QtileError
CORES = [
'wayland',
'x11',
]
def get_core(backend, *args):
if backend not in CORES:
raise QtileError(f"Backend {backend} does not exist")
return importlib.import_module(f"libqtile.backend.{backend}.core").Core(*args)
```
#### File: libqtile/widget/keyboardlayout.py
```python
from __future__ import annotations
import re
from abc import ABCMeta, abstractmethod
from subprocess import CalledProcessError, check_output
from typing import TYPE_CHECKING
from libqtile.confreader import ConfigError
from libqtile.log_utils import logger
from libqtile.widget import base
if TYPE_CHECKING:
from typing import Optional
from libqtile.core.manager import Qtile
class _BaseLayoutBackend(metaclass=ABCMeta):
def __init__(self, qtile: Qtile):
"""
This handles getting and setter the keyboard layout with the appropriate
backend.
"""
@abstractmethod
def get_keyboard(self) -> str:
"""
Return the currently used keyboard layout as a string
Examples: "us", "us dvorak". In case of error returns "unknown".
"""
def set_keyboard(self, layout: str, options: Optional[str]) -> None:
"""
Set the keyboard layout with specified options.
"""
class _X11LayoutBackend(_BaseLayoutBackend):
kb_layout_regex = re.compile(r'layout:\s+(?P<layout>\w+)')
kb_variant_regex = re.compile(r'variant:\s+(?P<variant>\w+)')
def get_keyboard(self) -> str:
try:
command = 'setxkbmap -verbose 10 -query'
setxkbmap_output = check_output(command.split(' ')).decode()
except CalledProcessError as e:
logger.error('Can not get the keyboard layout: {0}'.format(e))
return "unknown"
except OSError as e:
logger.error('Please, check that xset is available: {0}'.format(e))
return "unknown"
match_layout = self.kb_layout_regex.search(setxkbmap_output)
if match_layout is None:
return 'ERR'
keyboard = match_layout.group('layout')
match_variant = self.kb_variant_regex.search(setxkbmap_output)
if match_variant:
keyboard += " " + match_variant.group('variant')
return keyboard
def set_keyboard(self, layout: str, options: Optional[str]) -> None:
command = ['setxkbmap']
command.extend(layout.split(" "))
if options:
command.extend(['-option', options])
try:
check_output(command)
except CalledProcessError as e:
logger.error('Can not change the keyboard layout: {0}'.format(e))
except OSError as e:
logger.error('Please, check that setxkbmap is available: {0}'.format(e))
class _WaylandLayoutBackend(_BaseLayoutBackend):
def __init__(self, qtile: Qtile) -> None:
self.set_keymap = qtile.core.cmd_set_keymap # type: ignore
self._layout: str = ""
def get_keyboard(self) -> str:
return self._layout
def set_keyboard(self, layout: str, options: Optional[str]) -> None:
maybe_variant: Optional[str] = None
if " " in layout:
layout_name, maybe_variant = layout.split(" ", maxsplit=1)
else:
layout_name = layout
self.set_keymap(layout_name, options, maybe_variant)
self._layout = layout
layout_backends = {
'x11': _X11LayoutBackend,
'wayland': _WaylandLayoutBackend,
}
class KeyboardLayout(base.InLoopPollText):
"""Widget for changing and displaying the current keyboard layout
To use this widget effectively you need to specify keyboard layouts you want to use (using "configured_keyboards")
and bind function "next_keyboard" to specific keys in order to change layouts.
For example:
Key([mod], "space", lazy.widget["keyboardlayout"].next_keyboard(), desc="Next keyboard layout."),
When running Qtile with the X11 backend, this widget requires setxkbmap to be available.
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("update_interval", 1, "Update time in seconds."),
("configured_keyboards", ["us"], "A list of predefined keyboard layouts "
"represented as strings. For example: "
"['us', 'us colemak', 'es', 'fr']."),
("display_map", {}, "Custom display of layout. Key should be in format "
"'layout variant'. For example: "
"{'us': 'us', 'lt sgs': 'sgs', 'ru phonetic': 'ru'}"),
("option", None, "string of setxkbmap option. Ex., 'compose:menu,grp_led:scroll'"),
]
def __init__(self, **config):
base.InLoopPollText.__init__(self, **config)
self.add_defaults(KeyboardLayout.defaults)
self.add_callbacks({'Button1': self.next_keyboard})
def _configure(self, qtile, bar):
base.InLoopPollText._configure(self, qtile, bar)
if qtile.core.name not in layout_backends:
raise ConfigError(
"KeyboardLayout does not support backend: " + qtile.core.name
)
self.backend = layout_backends[qtile.core.name](qtile)
self.backend.set_keyboard(self.configured_keyboards[0], self.option)
def next_keyboard(self):
"""Set the next layout in the list of configured keyboard layouts as
new current layout in use
If the current keyboard layout is not in the list, it will set as new
layout the first one in the list.
"""
current_keyboard = self.backend.get_keyboard()
if current_keyboard in self.configured_keyboards:
# iterate the list circularly
next_keyboard = self.configured_keyboards[
(self.configured_keyboards.index(current_keyboard) + 1) %
len(self.configured_keyboards)]
else:
next_keyboard = self.configured_keyboards[0]
self.backend.set_keyboard(next_keyboard, self.option)
self.tick()
def poll(self):
keyboard = self.backend.get_keyboard()
if keyboard in self.display_map.keys():
return self.display_map[keyboard]
return keyboard.upper()
def cmd_next_keyboard(self):
"""Select next keyboard layout"""
self.next_keyboard()
```
#### File: qtile/test/test_popup.py
```python
import textwrap
def test_popup_focus(manager):
manager.test_window("one")
start_wins = len(manager.backend.get_all_windows())
success, msg = manager.c.eval(textwrap.dedent("""
from libqtile.popup import Popup
popup = Popup(self,
x=0,
y=0,
width=self.current_screen.width,
height=self.current_screen.height,
)
popup.place()
popup.unhide()
"""))
assert success, msg
end_wins = len(manager.backend.get_all_windows())
assert end_wins == start_wins + 1
assert manager.c.group.info()['focus'] == 'one'
assert manager.c.group.info()['windows'] == ['one']
assert len(manager.c.windows()) == 1
```
#### File: test/widgets/test_textbox.py
```python
import libqtile.bar
import libqtile.config
import libqtile.confreader
import libqtile.layout
from libqtile import widget
def test_command_interface(manager_nospawn, minimal_conf_noscreen):
# Set a short interval and start so widget exits immediately
textbox = widget.TextBox(text="Testing")
config = minimal_conf_noscreen
config.screens = [
libqtile.config.Screen(
top=libqtile.bar.Bar([textbox], 10)
)
]
manager_nospawn.start(config)
bar = manager_nospawn.c.bar["top"]
w = bar.info()["widgets"][0]
assert w["text"] == "Testing"
manager_nospawn.c.widget["textbox"].update("Updated")
w = bar.info()["widgets"][0]
assert w["text"] == "Updated"
assert manager_nospawn.c.widget["textbox"].get() == "Updated"
``` |
{
"source": "1lann/home-assistant",
"score": 2
} |
#### File: homeassistant/components/feedreader.py
```python
from datetime import datetime
from logging import getLogger
import voluptuous as vol
from homeassistant.helpers.event import track_utc_time_change
REQUIREMENTS = ['feedparser==5.2.1']
_LOGGER = getLogger(__name__)
DOMAIN = "feedreader"
EVENT_FEEDREADER = "feedreader"
# pylint: disable=no-value-for-parameter
CONFIG_SCHEMA = vol.Schema({
DOMAIN: {
'urls': [vol.Url()],
}
}, extra=vol.ALLOW_EXTRA)
# pylint: disable=too-few-public-methods
class FeedManager(object):
"""Abstraction over feedparser module."""
def __init__(self, url, hass):
"""Initialize the FeedManager object, poll every hour."""
self._url = url
self._feed = None
self._hass = hass
# Initialize last entry timestamp as epoch time
self._last_entry_timestamp = datetime.utcfromtimestamp(0).timetuple()
_LOGGER.debug('Loading feed %s', self._url)
self._update()
track_utc_time_change(hass, lambda now: self._update(),
minute=0, second=0)
def _log_no_entries(self):
"""Send no entries log at debug level."""
_LOGGER.debug('No new entries in feed %s', self._url)
def _update(self):
"""Update the feed and publish new entries in the event bus."""
import feedparser
_LOGGER.info('Fetching new data from feed %s', self._url)
self._feed = feedparser.parse(self._url,
etag=None if not self._feed
else self._feed.get('etag'),
modified=None if not self._feed
else self._feed.get('modified'))
if not self._feed:
_LOGGER.error('Error fetching feed data from %s', self._url)
else:
if self._feed.bozo != 0:
_LOGGER.error('Error parsing feed %s', self._url)
# Using etag and modified, if there's no new data available,
# the entries list will be empty
elif len(self._feed.entries) > 0:
_LOGGER.debug('Entries available in feed %s', self._url)
self._publish_new_entries()
self._last_entry_timestamp = \
self._feed.entries[0].published_parsed
else:
self._log_no_entries()
def _publish_new_entries(self):
"""Publish new entries to the event bus."""
new_entries = False
for entry in self._feed.entries:
# Consider only entries newer then the latest parsed one
if entry.published_parsed > self._last_entry_timestamp:
new_entries = True
entry.update({'feed_url': self._url})
self._hass.bus.fire(EVENT_FEEDREADER, entry)
if not new_entries:
self._log_no_entries()
def setup(hass, config):
"""Setup the feedreader component."""
urls = config.get(DOMAIN)['urls']
feeds = [FeedManager(url, hass) for url in urls]
return len(feeds) > 0
```
#### File: components/sensor/wink.py
```python
import logging
from homeassistant.const import (CONF_ACCESS_TOKEN, STATE_CLOSED,
STATE_OPEN, TEMP_CELSIUS)
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['python-wink==0.7.4']
SENSOR_TYPES = ['temperature', 'humidity']
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Wink platform."""
import pywink
if discovery_info is None:
token = config.get(CONF_ACCESS_TOKEN)
if token is None:
logging.getLogger(__name__).error(
"Missing wink access_token. "
"Get one at https://winkbearertoken.appspot.com/")
return
pywink.set_bearer_token(token)
for sensor in pywink.get_sensors():
if sensor.capability() in SENSOR_TYPES:
add_devices([WinkSensorDevice(sensor)])
add_devices(WinkEggMinder(eggtray) for eggtray in pywink.get_eggtrays())
class WinkSensorDevice(Entity):
"""Representation of a Wink sensor."""
def __init__(self, wink):
"""Initialize the sensor."""
self.wink = wink
self.capability = self.wink.capability()
if self.wink.UNIT == "°":
self._unit_of_measurement = TEMP_CELSIUS
else:
self._unit_of_measurement = self.wink.UNIT
@property
def state(self):
"""Return the state."""
if self.capability == "humidity":
return self.wink.humidity_percentage()
elif self.capability == "temperature":
return self.wink.temperature_float()
else:
return STATE_OPEN if self.is_open else STATE_CLOSED
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def unique_id(self):
"""Return the ID of this wink sensor."""
return "{}.{}".format(self.__class__, self.wink.device_id())
@property
def name(self):
"""Return the name of the sensor if any."""
return self.wink.name()
@property
def available(self):
"""True if connection == True."""
return self.wink.available
def update(self):
"""Update state of the sensor."""
self.wink.update_state()
@property
def is_open(self):
"""Return true if door is open."""
return self.wink.state()
class WinkEggMinder(Entity):
"""Representation of a Wink Egg Minder."""
def __init__(self, wink):
"""Initialize the sensor."""
self.wink = wink
@property
def state(self):
"""Return the state."""
return self.wink.state()
@property
def unique_id(self):
"""Return the id of this wink Egg Minder."""
return "{}.{}".format(self.__class__, self.wink.device_id())
@property
def name(self):
"""Return the name of the Egg Minder if any."""
return self.wink.name()
def update(self):
"""Update state of the Egg Minder."""
self.wink.update_state()
``` |
{
"source": "1lca/stix2arango",
"score": 3
} |
#### File: stix2arango/stix2arango/postgresql.py
```python
import ipaddress
import copy
from nis import match
from typing import Dict
from stix2arango.exceptions import InvalidObjectForOptimizer
from stix2arango.utils import deep_dict_update
import psycopg2
from psycopg2.errors import DuplicateTable
import uuid
type_map = {
'str' : 'TEXT',
'int' : 'INT'
}
operator_map = {
'>' : '>',
'<' : '<',
'=' : '=',
'!=' : '!=',
'<=' : '<=',
'>=' : '>=',
'like' : 'LIKE',
}
def convert_type(type_, value_):
"""Map between python type/postgresql type
Args:
type_ (str): python field type
value_ (str): python field value
Raises:
RuntimeError: When a type can't be converted
Returns:
str: the postgresql associed type
"""
if type_ in type_map:
return type_map[type_]
raise RuntimeError("%s type not found" % (type_))
class PGResult(dict):
def __init__(
self,
arango_conn,
arango_id=None,
field0_value=None,
optimizer=None
):
self.arango_conn = arango_conn
self.arango_id = arango_id
self.field0_value = field0_value
self.optimizer = optimizer
super().__init__()
def __call__(self):
if self.field0_value:
pass
class PostgresOptimizer:
postgres_conn = None
count = 0
db_name = None
db_host = None
db_user = None
db_pass = None
db_port = None
def __init__(self, field):
self.uuid = str(uuid.uuid4()).replace('-','_')
self.table_name = None
self.field = field
self.table_created = False
if not(PostgresOptimizer.postgres_conn):
raise RuntimeError('PostgresOptimizer.postgres_conn is not set')
def insert_stix_obj(self, stix_object, arango_id, feed):
if not self.table_created:
self.__create_table(feed, stix_object)
arango_id = int(arango_id.split('/')[-1])
if self.field != 'ipv4-addr:x_ip':
value = self.__extract_field_value(self.field, stix_object)
elif stix_object['type'] == 'ipv4-addr':
value = stix_object['value']
else:
raise InvalidObjectForOptimizer(stix_object['type'])
sql = "INSERT INTO " + self.table_name + " values (%s, %s, %s);"
with PostgresOptimizer.postgres_conn.cursor() as cursor:
cursor.execute(sql, [value, arango_id, stix_object['id']])
if self.count % 1000 == 0:
PostgresOptimizer.postgres_conn.commit()
self.count += 1
return stix_object
def craft_obj_from_request(self, stix_id, field0):
type = self.field.split(':')[0]
if self.field == 'ipv4-addr:x_ip':
obj = {'id' : stix_id, 'type' : type, 'value' : field0}
return obj
path = self.field.split(':')[1:-1]
value_name = self.field.split(':')[-1]
obj = {'id' : stix_id, 'type' : type}
current_obj = obj
for step in path:
current_obj[step] = {}
current_obj = current_obj[step]
current_obj[value_name] = field0
return obj
def present_results(self, results):
dict_results = {}
for arango_id, stix_id, field0 in results:
if not str(arango_id) in dict_results:
dict_results[str(arango_id)] = []
dict_results[str(arango_id)] += [self.craft_obj_from_request(stix_id, field0)]
return dict_results
def query(self, operator, value, feed):
if value[0] == '"' and value[-1] == '"':
value = "'" + value[1:-1] + "'"
self.table_name = feed.storage_paradigm.get_collection_name(feed) + self.uuid
if self.field == 'ipv4-addr:x_ip':
middle_sql = 'field0 >> ' + value
middle_sql += ' OR field0 = ' + value
else:
middle_sql = 'field0 ' + operator_map[operator] + ' ' + value
sql = 'select arango_id, stix_id, field0 from ' + self.table_name + ' where ' + middle_sql + ';'
with PostgresOptimizer.postgres_conn.cursor() as cursor:
cursor.execute(sql)
results = cursor.fetchall()
return self.present_results(results)
def query_from_arango_results(self, col_name, results, arango_conn):
# ! BUG : 2 times the same results
self.table_name = col_name + self.uuid
pg_results = []
for r in results:
try:
r = r.getStore()
except:
pass
if r['type'] == self.field.split(':')[0] or\
(self.field == 'ipv4-addr:x_ip' and r['type'] == 'ipv4-addr'):
sql = 'select arango_id, stix_id, field0 from ' + self.table_name + ' where arango_id = \'' + r['_key'] + '\''
cursor = PostgresOptimizer.postgres_conn.cursor()
cursor.execute(sql)
pg_results += cursor.fetchall()
pg_results = self.present_results(pg_results)
cross = []
for r in results:
if r['_key'] in pg_results:
for x in pg_results[r['_key']]:
r = copy.deepcopy(r)
cross.append(deep_dict_update(r, x))
return cross
def crosses_results_with_arango(self, results, arango_conn, col_name) -> list:
aql2 = 'for el in %s filter el._key in %s return el' % (col_name, str(list(results.keys())))
aql_results = [result.getStore() for result in
arango_conn.AQLQuery(aql2, raw_results=True)]
matched_results = []
for m in aql_results:
obj = copy.deepcopy(m)
for pg_obj in results[m['_key']]:
deep_dict_update(obj, pg_obj)
matched_results.append(obj)
return matched_results
def __del__(self):
if self.table_created and PostgresOptimizer.postgres_conn:
idx_name = 'idx_' + self.uuid
sql = 'CREATE INDEX ' + idx_name + ' ON ' + self.table_name + '(field0)'
try:
with PostgresOptimizer.postgres_conn.cursor() as cursor:
cursor.execute(sql)
except:
pass
if PostgresOptimizer.postgres_conn:
PostgresOptimizer.postgres_conn.commit()
def __dict__(self):
return {
'class': str(self.__class__.__name__).lower(),
'field' : self.field,
'uuid' : self.uuid
}
def __extract_field_type(self, field, stix_object):
object = copy.deepcopy(stix_object)
if field.split(':')[0] == object['type']:
for f in field.split(':')[1:]:
try:
object = object[f]
except (TypeError, KeyError):
raise InvalidObjectForOptimizer(stix_object['type'])
return type(object).__name__
else:
raise InvalidObjectForOptimizer(stix_object['type'])
def __extract_field_value(self, field, stix_object):
object = copy.deepcopy(stix_object)
if field.split(':')[0] == object['type']:
for f in field.split(':')[1:]:
try:
object = object[f]
except (TypeError, KeyError):
raise InvalidObjectForOptimizer(stix_object['type'])
return object
else:
raise InvalidObjectForOptimizer(stix_object['type'])
def __create_table(self, feed, stix_object):
try:
if self.field != 'ipv4-addr:x_ip':
type_ = self.__extract_field_type(self.field, stix_object)
value = self.__extract_field_value(self.field, stix_object)
type_ = convert_type(type_, value)
else:
type_ = 'inet'
content = 'field0 ' + type_ + ', arango_id int, stix_id text'
self.table_name = feed.storage_paradigm.get_collection_name(feed) + self.uuid
cursor = PostgresOptimizer.postgres_conn.cursor()
base_query = 'create table ' + self.table_name + ' (%s);'
query = base_query % (content)
cursor.execute(query)
cursor.close()
PostgresOptimizer.postgres_conn.commit()
except DuplicateTable:
pass
self.table_created = True
def list_all_table(self):
s = "SELECT"
s += " table_schema"
s += ", table_name"
s += " FROM information_schema.tables"
s += " WHERE"
s += " ("
s += " table_schema = 'public'"
s += " )"
s += " ORDER BY table_schema, table_name;"
with PostgresOptimizer.postgres_conn.cursor() as cursor:
cursor.execute(s)
results = cursor.fetchall()
return [list(r)[1] for r in results]
def drop_table(self, feed_name) -> bool:
try:
for table_name in self.list_all_table():
if table_name.startswith(feed_name):
sql = 'drop table ' + table_name
with PostgresOptimizer.postgres_conn.cursor() as cursor :
cursor.execute(sql)
PostgresOptimizer.postgres_conn.commit()
self.table_created = False
return True
except Exception:
return False
def delete_fields_in_object(self, object):
object = copy.deepcopy(object)
object_type = self.field.split(':')[0]
field_path = self.field.split(':')[1:-1]
last_field = self.field.split(':')[-1]
if object['type'] == object_type:
dict_to_remove = object
for f in field_path:
if f in dict_to_remove:
dict_to_remove = dict_to_remove[f]
else:
break
if last_field in dict_to_remove:
del dict_to_remove[last_field]
if self.field == 'ipv4-addr:x_ip':
if 'value' in object:
del object['value']
if 'id' in object:
del object['id']
return object
@staticmethod
def connect_db():
auth = "dbname='%s' user='%s' host='%s' password='%s' port='%s'"
auth = auth % (
PostgresOptimizer.db_name,
PostgresOptimizer.db_user,
PostgresOptimizer.db_host,
PostgresOptimizer.db_pass,
PostgresOptimizer.db_port
)
PostgresOptimizer.postgres_conn = psycopg2.connect(auth)
```
#### File: stix2arango/test/test_main.py
```python
import unittest
import sys
sys.path.insert(0, '/app')
sys.path.insert(0, '.')
from stix2 import IPv4Address, AutonomousSystem, Identity
from stix2 import Relationship, Incident, IPv6Address
from pyArango.connection import *
from stix2arango.feed import Feed, vaccum
from stix2arango.request import Request
from stix2arango.storage import GROUPED, GROUPED_BY_MONTH, TIME_BASED, STATIC
from stix2arango.utils import get_database
from stix2arango import stix_modifiers
from datetime import datetime, timedelta
db_conn = get_database()
class GeneralTest(unittest.TestCase):
def setUp(self):
self.autonomous_system = AutonomousSystem(number=1234, name='Google')
self.ipv4 = IPv4Address(value='172.16.17.32', belongs_to_refs=[self.autonomous_system.id])
self.identity = Identity(name='<NAME>', identity_class='individual')
self.relation = Relationship(source_ref=self.identity.id, target_ref=self.ipv4.id, relationship_type='attributed-to')
self.ipv4_net = IPv4Address(value='172.16.31.10/24', belongs_to_refs=[self.autonomous_system.id])
self.ipv6 = IPv6Address(value='2001:0db8:85a3:0000:0000:8a2e:0370:7334', belongs_to_refs=[self.autonomous_system.id])
self.feed = Feed(db_conn, 'timefeed', tags=['time_based'], storage_paradigm=TIME_BASED)
def test_insert(self):
self.feed.insert_stix_object_in_arango([self.ipv4,
self.autonomous_system,
self.identity,
self.relation,
self.ipv4_net,
self.ipv6])
# test with grouped paradigm
def test_grouped(self):
autonomous_system = AutonomousSystem(number=1234, name='Google')
ipv4 = IPv4Address(value='172.16.17.32', belongs_to_refs=[autonomous_system.id])
identity = Identity(name='<NAME>', identity_class='individual')
relation = Relationship(source_ref=identity.id, target_ref=ipv4.id, relationship_type='attributed-to')
feed = Feed(db_conn, 'groupedfeed', tags=['paynoattention', 'grouped'], storage_paradigm=GROUPED)
feed.insert_stix_object_in_arango([ipv4, autonomous_system, identity, relation])
# test with grouped-by-month paradigm
feed = Feed(db_conn, 'grouped_by_month_feed', tags=['paynoattention', 'dogstory'], storage_paradigm=GROUPED_BY_MONTH)
identity = Identity(name='<NAME>', identity_class='individual')
course_of_action = Incident(name='INC 1078', description='My dog barked on neighbors')
relation = Relationship(source_ref=course_of_action.id, target_ref=identity.id, relationship_type='attributed-to')
feed.insert_stix_object_in_arango([identity, course_of_action, relation])
feeds = Feed.get_last_feeds(db_conn, datetime(2022, 12, 12))
def test_request_1(self):
request = Request(db_conn, datetime.now())
results = request.request(" [ipv4-addr:x_ip = '172.16.17.32' ] ",
tags=['time_based'], max_depth=1)
self.assertEqual(len(results), 5)
request = Request(db_conn, datetime.now())
results = request.request("""[ identity:name = '<NAME>']""",
tags=['time_based'])
self.assertEqual(len(results), 3)
def test_request_2(self):
feed = Feed(db_conn, 'patterntestfeed', tags=['patterntestfeed'], storage_paradigm=TIME_BASED, )
ipv4 = IPv4Address(value='192.168.127.12/24')
feed.insert_stix_object_in_arango([ipv4])
request = Request(db_conn, datetime.now())
results = request.request("[ipv4-addr:x_ip='172.16.17.32']",
tags=['patterntestfeed'])
self.assertEqual(len(results), 1)
results = request.request("[ malware:name = 'Adware' ]",
tags=['patterntestfeed'])
self.assertEqual(len(results), 0)
def test_vaccum(self):
feed = Feed(db_conn, 'vaccumentest', tags=['vaccum'], storage_paradigm=TIME_BASED, vaccum_date=datetime.fromtimestamp(10))
ipv4 = IPv4Address(value='192.168.127.12/24')
feed.insert_stix_object_in_arango([ipv4])
vaccum(db_conn)
feeds = Feed.get_last_feeds(db_conn, datetime(2022, 12, 12))
for feed in feeds:
if feed.feed_name == 'vaccumentest':
raise Exception('Vaccum failed')
def test_optimisation_patch(self):
r = '[ipv4-addr:value = "mushroom" OR ipv4-addr:net != "red hot"]'
request = Request(db_conn, datetime.now())
results = request.request(r)
self.assertGreater(len(results), 0)
def test_patch_issue20(self):
feed = Feed(db_conn, 'patch20', tags=['patch20'], storage_paradigm=TIME_BASED)
ipv4 = IPv4Address(value='172.16.17.32')
identity = Identity(name='<NAME>', identity_class='individual')
feed.insert_stix_object_in_arango([ipv4, identity])
autonomous_system = AutonomousSystem(number=1234, name='Google')
ipv4 = IPv4Address(value='172.16.17.32', belongs_to_refs=[autonomous_system.id])
feed.insert_stix_object_in_arango([ipv4, autonomous_system])
feeds = Feed.get_last_feeds(db_conn, datetime.now())
for f in feeds:
if f.feed_name == 'patch20':
self.assertEqual(f.inserted_stix_types,
['ipv4-addr', 'identity', 'autonomous-system']
)
def test_static_storage_issue_21(self):
feed = Feed(db_conn, 'staticfeed', storage_paradigm=STATIC)
ipv4 = IPv4Address(value='172.16.17.32')
feed.insert_stix_object_in_arango([ipv4])
col_name = feed.storage_paradigm.get_collection_name(feed)
self.assertEqual(db_conn[col_name].count(), 1)
feed = Feed(db_conn, 'staticfeed', storage_paradigm=STATIC)
feed.insert_stix_object_in_arango([self.identity, self.autonomous_system])
self.assertEqual(db_conn[col_name].count(), 2)
def test_grouped_search(self):
request = Request(db_conn, datetime.now() - timedelta(days=1000))
r = request.request("[identity:name = '<NAME>']", tags=['grouped'])
self.assertGreater(len(r), 0)
```
#### File: stix2arango/test/test_postgresql.py
```python
from ipaddress import IPV4LENGTH
import psycopg2
import unittest
import os
import sys
from datetime import datetime
from requests import request
sys.path.insert(0, '/app')
sys.path.insert(0, '.')
sys.path.insert(0, '..')
from psycopg2.errors import UndefinedTable, InFailedSqlTransaction
from stix2 import IPv4Address, AutonomousSystem, Identity, File
from stix2 import Relationship, Incident, IPv6Address
from pyArango.connection import Connection
from pyArango.theExceptions import CreationError
from stix2arango.feed import Feed, vaccum
from stix2arango.storage import TIME_BASED, STATIC
from stix2arango.postgresql import PostgresOptimizer
from stix2arango.request import Request
from stix2arango.utils import get_database
db = 'stix2arango'
user = 'root'
pass_ = '<PASSWORD>'
host = 'localhost'
def get_number_of_table_for_feed(feed_name, cursor):
s = "SELECT"
s += " table_schema"
s += ", table_name"
s += " FROM information_schema.tables"
s += " WHERE"
s += " ("
s += " table_schema = 'public'"
s += " )"
s += " ORDER BY table_schema, table_name;"
cursor = PostgresOptimizer.postgres_conn.cursor()
cursor.execute(s)
results = [list(r)[1] for r in cursor.fetchall()]
results = [r for r in results if r.startswith(feed_name) ]
cursor.close()
return len(results)
auth = "dbname='%s' user='%s' host='%s' password='%s'" % (db, user, host, pass_)
arango_conn = get_database()
postgres_conn = psycopg2.connect(auth)
PostgresOptimizer.postgres_conn = postgres_conn
class TestMerge(unittest.TestCase):
def setUp(self) :
self.feed = Feed(
arango_conn,
'posgres_merge_test',
tags=['postgres'],
date=datetime.now(),
storage_paradigm=TIME_BASED
)
self.optimizer = PostgresOptimizer('ipv4-addr:x_ip')
self.feed.optimizers.append(self.optimizer)
self.insert_obj()
def insert_obj(self):
obj_list = []
autonomous_system_1 = AutonomousSystem(number=123, name='fake')
autonomous_system_2 = AutonomousSystem(number=124, name='fake2')
obj_list += [autonomous_system_1]
obj_list += [autonomous_system_2]
obj_list += [IPv4Address(value='192.168.127.12', belongs_to_refs=[autonomous_system_1.id])]
obj_list += [IPv4Address(value='172.16.17.32', belongs_to_refs=[autonomous_system_1.id])]
obj_list += [IPv4Address(value='192.168.3.11', belongs_to_refs=[autonomous_system_1.id])]
obj_list += [IPv4Address(value='172.16.31.10', belongs_to_refs=[autonomous_system_2.id])]
obj_list += [IPv4Address(value='172.16.58.3', belongs_to_refs=[autonomous_system_1.id])]
obj_list += [IPv4Address(value='192.168.127.12', belongs_to_refs=[autonomous_system_2.id])]
self.feed.insert_stix_object_in_arango(obj_list)
def test_obj_merge(self):
aql = 'for el in ' + self.feed.storage_paradigm.get_collection_name(self.feed) + ' return el'
results = arango_conn.AQLQuery(aql, raw_results=True)
assert(len(results) == 4)
sql = 'select * from ' + self.optimizer.table_name + ';'
cursor = PostgresOptimizer.postgres_conn.cursor()
cursor.execute(sql)
results = cursor.fetchall()
cursor.close()
assert(len(results) == 6)
request = Request(arango_conn, datetime.now())
results = request.request(" [autonomous-system:number = 124 ] ",
max_depth=1, tags=['postgres'])
assert(len(results) == 3)
class TestOptimizer(unittest.TestCase):
def setUp(self):
self.feed = Feed(
arango_conn,
'posgres_test',
tags=['test_postgres_optimizer'],
date=datetime.now(),
storage_paradigm=TIME_BASED
)
optimized_field0 = 'ipv4-addr:value'
optimizer0 = PostgresOptimizer(optimized_field0)
optimized_field1 = 'ipv4-addr:x_ip:broadcast_addr'
optimizer1 = PostgresOptimizer(optimized_field1)
self.feed.optimizers.append(optimizer0)
self.insert()
def insert(self):
_autonomous_system = AutonomousSystem(number=1234, name='Google')
self.ipv4_1 = IPv4Address(value='172.16.31.10', belongs_to_refs=[_autonomous_system.id])
ipv4_2 = IPv4Address(value='192.168.3.11/24', belongs_to_refs=[_autonomous_system.id])
_identity = Identity(name='<NAME>', identity_class='individual')
_relation = Relationship(source_ref=_identity.id, target_ref=self.ipv4_1.id, relationship_type='attributed-to')
self.feed.insert_stix_object_in_arango([_autonomous_system,self.ipv4_1 , ipv4_2, _identity, _relation])
def test_postgres_optimizer(self):
request = Request(arango_conn, datetime.now())
results = request.request(" [ipv4-addr:value = '172.16.31.10' ] ",
max_depth=0, tags=['test_postgres_optimizer'])
dict_1 = dict(self.ipv4_1)
dict_2 = dict(results[0])
del dict_2['x_tags']
del dict_2['x_feed']
self.assertEqual(len(dict_1.keys()), len(dict_2.keys()))
self.assertEqual(dict_1, dict_2)
class TestRemoveField(unittest.TestCase):
def test_remove_field(self):
optimized_field0 = 'ipv4-addr:value'
optimizer0 = PostgresOptimizer(optimized_field0)
obj1 = {'type': 'ipv4-addr', 'value' : 'coucou'}
obj2 = {'type': 'domain-name', 'value' : 'coucou'}
r = optimizer0.delete_fields_in_object(obj1)
self.assertEqual(r, {'type': 'ipv4-addr'})
r = optimizer0.delete_fields_in_object(obj2)
self.assertEqual(r, {'type': 'domain-name', 'value': 'coucou'})
obj3 = {'type': 'ipv4-addr', 'value2' : 'coucou'}
r = optimizer0.delete_fields_in_object(obj3)
class TestVaccum(unittest.TestCase):
def setUp(self):
self.feed = Feed(
arango_conn,
'v',
tags=['postgres'],
date=datetime.now(),
storage_paradigm=TIME_BASED,
vaccum_date=datetime.fromtimestamp(10)
)
optimized_field0 = 'ipv4-addr:value'
self.optimizer0 = PostgresOptimizer(optimized_field0)
self.feed.optimizers.append(self.optimizer0)
ipv4 = IPv4Address(value='172.16.31.10/24')
self.feed.insert_stix_object_in_arango([ipv4])
def test_vaccum(self):
vaccum(arango_conn)
feeds = Feed.get_last_feeds(arango_conn, datetime(2022, 12, 12))
feeds = [f for f in feeds if 'v' == f.feed_name]
self.assertEqual(len(feeds), 0)
class StaticStorageTest(unittest.TestCase):
def setUp(self):
feed = Feed(
arango_conn,
'posgres_test_static',
tags=['postgres'],
date=datetime.now(),
storage_paradigm=STATIC
)
optimizer2 = PostgresOptimizer('ipv4-addr:x_ip')
feed.optimizers.append(optimizer2)
ipv4 = IPv4Address(value='172.16.31.10')
feed.insert_stix_object_in_arango([ipv4])
feed = Feed(
arango_conn,
'posgres_test_static',
tags=['postgres'],
date=datetime.now(),
storage_paradigm=STATIC
)
optimizer2 = PostgresOptimizer('ipv4-addr:x_ip')
feed.optimizers.append(optimizer2)
ipv4_1 = IPv4Address(value='172.16.31.10/24')
ipv4_2 = IPv4Address(value='172.16.17.32')
feed.insert_stix_object_in_arango([ipv4_1, ipv4_2])
self.feed = feed
def test_static_storage(self):
nb_tables = get_number_of_table_for_feed(
self.feed.feed_name,
PostgresOptimizer.postgres_conn.cursor()
)
self.assertEquals(nb_tables, 1)
class MatchIpOnCidrTest(unittest.TestCase):
def setUp(self):
self.feed = Feed(
arango_conn,
'postgres_test_cidr',
tags=['postgres_test_cidr'],
date=datetime.now(),
storage_paradigm=TIME_BASED
)
ipv4_1 = IPv4Address(value='172.16.31.10/24')
ipv4_2 = IPv4Address(value='172.16.17.32')
self.feed.insert_stix_object_in_arango([ipv4_1, ipv4_2])
self.pattern = "[ipv4-addr:x_ip = '172.16.17.32']"
self.request = Request(arango_conn, datetime.now())
def test_match_ip_on_cidr(self):
results = self.request.request(self.pattern,
max_depth=0, tags=['postgres_test_cidr'])
self.assertEqual(len(results), 2)
class TwoOptimizesTest(unittest.TestCase):
def setUp(self):
feed = Feed(
arango_conn,
'postgres_2optimizers',
tags=['postgres_test2'],
date=datetime.now(),
storage_paradigm=TIME_BASED
)
optimizer1 = PostgresOptimizer('ipv4-addr:x_ip')
optimizer2 = PostgresOptimizer('autonomous-system:number')
feed.optimizers.append(optimizer1)
feed.optimizers.append(optimizer2)
obj_list = []
autonomous_system_1 = AutonomousSystem(number=123, name='fake')
autonomous_system_2 = AutonomousSystem(number=124, name='fake2')
obj_list += [autonomous_system_1]
obj_list += [autonomous_system_2]
obj_list += [IPv4Address(value='192.168.127.12', belongs_to_refs=[autonomous_system_1.id])]
obj_list += [IPv4Address(value='172.16.17.32', belongs_to_refs=[autonomous_system_1.id])]
obj_list += [IPv4Address(value='192.168.3.11', belongs_to_refs=[autonomous_system_1.id])]
obj_list += [IPv4Address(value='172.16.31.10', belongs_to_refs=[autonomous_system_2.id])]
obj_list += [IPv4Address(value='172.16.58.3', belongs_to_refs=[autonomous_system_1.id])]
obj_list += [IPv4Address(value='192.168.127.12', belongs_to_refs=[autonomous_system_2.id])]
self.obj_list = obj_list
self.feed = feed
def test_insert(self):
self.feed.insert_stix_object_in_arango(self.obj_list)
def test_request(self):
request = Request(arango_conn, datetime.now())
results = request.request(" [autonomous-system:number = 124 ] ",
max_depth=1, tags=['postgres_test2'])
# raise Exception(str(results))
self.assertEqual(len(results), 3)
results = request.request(" [ipv4-addr:x_ip = '192.168.127.12' ] ",
max_depth=1, tags=['postgres_test2'])
self.assertEqual(len(results), 2)
class TestTreesObject(unittest.TestCase):
def setUp(self):
self.feed = Feed(
arango_conn,
'posgres_trees_obj',
tags=['posgres_trees_obj'],
date=datetime.now(),
storage_paradigm=STATIC
)
self.file1 = File(name='file1', hashes={
'md5':'e0323a9039add2978bf5b49550572c7c',
'sha256':'961b6dd3ede3cb8ecbaacbd68de040cd78eb2ed5889130cceb4c49268ea4d506'})
self.file2 = File(name='file2', hashes={
'md5':'1aabac6d068eef6a7bad3fdf50a05cc8',
'sha256':'3b64db95cb55c763391c707108489ae18b4112d783300de38e033b4c98c3deaf'})
self.optimizer = PostgresOptimizer('file:hashes:md5')
self.feed.optimizers.append(self.optimizer)
self.feed.insert_stix_object_in_arango([self.file1, self.file2])
def test_trees_obj(self):
feed = Feed(
arango_conn,
'posgres_trees_obj',
tags=['posgres_trees_obj'],
date=datetime.now(),
storage_paradigm=STATIC
)
file1 = File(name='file1', hashes={
'md5':'e0323a9039add2978bf5b49550572c7c',
'sha256':'961b6dd3ede3cb8ecbaacbd68de040cd78eb2ed5889130cceb4c49268ea4d506'})
file2 = File(name='file2', hashes={
'md5':'1aabac6d068eef6a7bad3fdf50a05cc8',
'sha256':'3b64db95cb55c763391c707108489ae18b4112d783300de38e033b4c98c3deaf'})
optimizer = PostgresOptimizer('file:hashes:MD5')
feed.optimizers.append(optimizer)
feed.insert_stix_object_in_arango([file1, file2])
request = Request(arango_conn, datetime.now())
results = request.request(" [file:hashes:MD5 = 'e0323a9039add2978bf5b49550572c7c'] ",
max_depth=1, tags=['posgres_trees_obj'])
self.assertEqual(len(results), 1)
dict_file = {k:v for k,v in dict(file1).items() if k not in ['x_feed', 'x_tags'] } # ! OK
dict_result0 = {k:v for k,v in results[0].items() if k not in ['x_feed', 'x_tags'] } # ! OK
self.assertEqual(dict_result0, dict_file)
cursor = PostgresOptimizer.postgres_conn.cursor()
sql = 'select * from ' + optimizer.table_name + ';'
cursor.execute(sql)
results = cursor.fetchall()
cursor.close()
PostgresOptimizer.postgres_conn.commit()
self.assertEqual(len(results), 2)
TestMerge()
``` |
{
"source": "1lch2/PythonExercise",
"score": 4
} |
#### File: PythonExercise/data_structures/bit_operation.py
```python
class Bit:
"""Bit operation solutions
"""
@staticmethod
def getSmallest2Power(x: int) -> int:
"""For the given integer x, return the smallest power of 2 that bigger than x.
"""
i = 0
# 先循环右移得到最高有效位
while x > 0:
x = x >> 1
i += 1
j = 1
# 再循环左移得到次幂
while i > 0:
j = j << 1
i -= 1
return j
@staticmethod
def hammingWeight(n: int, method=0) -> int:
"""Count the number of 1 in the binary of input integer.
Args:
n: The input integer.
method: Different method. Can be 0 or 1:
"""
count = 0
if method == 0:
# n 循环右移和 1 按位与
while n != 0:
count += n & 1
n = n >> 1
elif method == 1:
# 统计 n 和 n-1 按位与得到 0 的次数
#* n - 1 : 最右边的 1 变为 0 , 此 1 右边的 0 都变为 1
#* n & (n-1): n 最右侧的 1 变为 0
#* e.g.:
#* n = 1010 1000
#* n-1 = 1010 0111
#* n&(n-1) = 1010 0000
while n != 0:
n = n & (n - 1)
count += 1
else:
raise(ValueError("Invalid method number, should be 0 or 1."))
return count
@classmethod
def reverseBits(cls, n: int) -> int:
"""Reverse a 32-bit unsigned integer.
"""
res = 0
power = 31 # 32位无符号整数
while n != 0:
#* n&1 取最右侧的位
#* 左移 power 位将右侧的位换到左侧对称位置
res = res | ((n & 1) << power)
n = n >> 1
power -= 1
return res
@classmethod
def isPowerOfTwo(cls, n: int, method=0) -> bool:
"""Judge if the input integer is power of 2.
Args:
n: Integer.
method: method=0: using complement code method.
method=1: using bit and method.
Returns:
Bool value.
"""
if method==0:
# 补码表示法中,x 与 -x 的共同点为最右侧符号相同
#* e.g.:
#* x补 = 15 = 0 0111
#* -x补 = -15 = 1 1001
#* x & (-x) = 0 0001 != x
#* x补 = 16 = 0 1000
#* -x补 = -16 = 1 1000
#* x & (-x) = 0 1000 == x
if n == 0:
return False
return n & (-n) == n
elif method == 1:
#* n & (n-1): n 最右侧的 1 变为 0
#* 若为 2 的 n 次幂, 只有一个 1
if n == 0:
return False
return n & (n-1) == 0
else:
raise(ValueError("Invalid method number, should be 0 or 1."))
@classmethod
def isDivisible(cls, a: int, b: int) -> bool:
"""Judge if the input integer b can be divided by a.
Args:
a: The dividend
b: The divisor
Returns:
Bool value.
"""
#* 思路来源:github.com/LinkinYoung
#* 若 a = kb,则可以将 k 转化为二进制
#* a / b 的过程转化为 a 减去多个 k 的整数倍,直到 b 不再需要乘倍数
#* 此时的结果为除法的余数,余数为 0 显然可以整除
if a < b:
return cls.isDivisible(b, a)
while True:
k = b # 保留除数的副本
i = 0 # 记录左移次数
# 对除数循环左移,直到高位在被除数高位的下一位
while (k << 1) < a:
k = k << 1
i += 1
a = a - k
if i == 0: # 左移次数为 0 时除法结束,此时检查余数是否为 0
return a == 0
@classmethod
def addWithoutSign(cls, a: int, b: int) -> int:
"""Calculate a + b without using "+"
"""
#* ^ 异或,相当于无进位求和
#* & 与,相当于求进位数
#* 公式:(a ^ b) ^ ((a & b) << 1)
#* 重复直到没有进位
#* Python 中整数以补码形式存储
x = 0xffffffff # 32位无符号整数
a, b = a&x, b&x # 舍去32位以上的位数
while b != 0: # 当进位数为 0 时跳出
c = ((a & b) << 1) & x # 进位
a = a ^ b # 无进位求和
b = c# b 用进位数代替
# 0x7fffffff 是最大的32位正整数
if a <= 0x7fffffff:
return a
# 若结果为负数,则先将32位部分按位取反,再将整个数字取反
# 结果为32位以内的不变,32位之外取反,得到负数
else:
return ~(a ^ x)
if __name__ == "__main__":
print(Bit.getSmallest2Power(20))
print(Bit.hammingWeight(15))
print(bin(31))
print(bin(Bit.reverseBits(31)))
print(Bit.isDivisible(9, 3))
print(Bit.isDivisible(9, 2))
```
#### File: PythonExercise/data_structures/LFUcache.py
```python
class Node:
"""Dual linked list node object.
"""
def __init__(self, key, val):
self.val = val
self.key = key
self.times = 0
self.prev = None
self.next = None
def insert(self, position: "Node"):
"""Insert node object before the given position.
"""
self.next = position
self.prev = position.prev
position.prev.next = self
position.prev = self
class LFUCache:
"""LFU cache object.
The cache with less using times are closer to head.
If there are caches with same using times, the older one is closer to head.
"""
def __init__(self, capacity: int):
"""Nodes are placed in ascending order of 'times' property.
"""
self.cap = capacity
self.cache = {} # {key: pointer}
self.head = Node("#", "#")
self.tail = Node("#", "#")
self.head.next = self.tail
self.tail.prev = self.head
# Set the using times for head and tail node in order to judge and sort.
self.head.times = -1
self.tail.times = float("inf")
def get(self, key: int) -> int:
if key in self.cache:
self.cache[key].times += 1
temp = self.remove_node(self.cache[key])
self.adjust(temp)
return self.cache[key].val
else:
return -1
def put(self, key: int, value: int) -> None:
"""Update the LFU cache.
"""
if self.cap == 0:
return
if key in self.cache:
self.cache[key].times += 1 # Add using times
self.cache[key].val = value # Update value
temp = self.remove_node(self.cache[key])
self.adjust(temp) # Adjust the location of the cache
else:
if len(self.cache) >= self.cap:
temp = self.remove_node(self.head.next) # Remove node from linked list
self.cache.pop(temp.key) # Remove key-value pair from cache
# Create new node and insert.
temp = Node(key, value)
self.cache[key] = temp
self.adjust(temp)
def remove_node(self, node: Node) -> Node:
"""Remove the node from linked list and return the pointer of the node.
"""
# Remove from dual linked list
node.prev.next = node.next
node.next.prev = node.prev
return node
def adjust(self, node: Node):
"""Adjust the position of given node to suit the ascending order of times.
With the while operation, the complexity is O(n)
"""
#TODO: improve the time complexity to O(1)
temp = self.tail
# Find insert position
while node.times < temp.prev.times and temp != self.head.next:
temp = temp.prev
# Check using times and insert the node
if temp.prev.times <= node.times and node.times < temp.times:
node.insert(temp)
```
#### File: leetcode/array/238.py
```python
from typing import List
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
res = []
l_len = len(nums)
left = [0 for _ in range(l_len)]
left[0] = 1
right = [0 for _ in range(l_len)]
right[-1] = 1
# Calculate left side of the index.
for i in range(l_len-1):
left[i+1] = left[i] * nums[i]
# Calculate right side of the index.
for i in range(l_len-1, 0, -1):
right[i-1] = right[i] * nums[i]
for i in range(l_len):
res.append(left[i] * right[i])
return res
```
#### File: leetcode/array/88.py
```python
from typing import List
# 双指针,从后向前,替换规则类似快排
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
i = m - 1
j = n - 1
p = len(nums1) - 1
# 下标不为负,需要包括 0 在内
while j >= 0 and i >= 0:
if nums2[j] > nums1[i]:
nums1[p] = nums2[j]
j -= 1
else:
# 若 nums1 当前位置数比 nums2 大,则交换nums1的数
nums1[p] = nums1[i]
i -= 1
p -= 1
# 可能存在 nums1 已经交换完,但是nums1当前数字比nums2最大数字要大
# 此时 i 已经指向了负数,而 j 尚未遍历完
# nums1 此时剩下 j 个位置等待填入 (不能使用 p)
nums1[:j+1] = nums2[:j+1]
```
#### File: leetcode/back_track/39.py
```python
from typing import List
class Solution:
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
if target < min(candidates):
return []
res = []
def backtrack(path: list, candidates: list):
# 递归终止条件 1:已经达到要求
if sum(path) == target:
res.append(path.copy()) # 将路径加入结果
return
# 递归终止条件 2:候选为空:
if candidates == []:
return
# 递归终止条件 3:当前无有效选择能满足要求
if sum(path) + min(candidates) > target:
return
for i in range(len(candidates)):
if candidates[i] + sum(path) > target: # 跳过使目标不成立的候选
continue
path.append(candidates[i])
backtrack(path, candidates[i:]) # 下一层递归中只能用当前及之后的元素
path.pop()
backtrack([], candidates)
return res
if __name__ == "__main__":
S = Solution()
print(S.combinationSum([2,3,5], 8))
```
#### File: leetcode/back_track/40.py
```python
from typing import List
class Solution:
def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:
if target < min(candidates):
return []
candidates.sort()
res = []
used = [False for _ in range(len(candidates))]
def backtrack(path: list, candidates: list, used: list):
if sum(path) == target:
res.append(path.copy())
return
# 需要先判断候选是否为空,不然下一个递归结束条件会出错
if candidates == []:
return
if sum(path) + min(candidates) > target:
return
for i in range(len(candidates)):
# 跳过不符合要求的元素
if sum(path) + candidates[i] > target:
continue
# 跳过已经使用过的元素
if used[i]:
continue
# 跳过上一轮迭代选择的相同元素
if i > 0:
if candidates[i] == candidates[i-1] and not used[i]:
continue
used[i] = True
path.append(candidates[i])
# 在本次选择之后的元素中进入下一层递归,同时对 used 数组做出同样调整
backtrack(path, candidates[i+1:], used[i+1:])
used[i] = False
path.pop()
backtrack([], candidates, used)
return res
if __name__ == "__main__":
S = Solution()
print(S.combinationSum2([10,1,2,7,6,1,5], 8))
```
#### File: leetcode/back_track/77.py
```python
from typing import List
class Solution:
def combine(self, n: int, k: int) -> List[List[int]]:
if k < 1:
return []
selection = [i for i in range(1, n+1)] # 选项
res = []
def backtrack(start: int, path: list, selection: list):
# 递归终止:path 已经有指定数量的元素k
if len(path) == k:
res.append(path[:]) # 使用拷贝避免引用导致结果被修改
return
# 遍历可用选择
for i in range(start, n):
path.append(selection[i])
# 排除本次选择元素,进入下一层递归
backtrack(i+1, path, selection)
path.pop() # 删除本次选择元素
backtrack(0, [], selection)
return res
```
#### File: leetcode/binary_tree/100.py
```python
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
# 使用广度优先遍历获取序列化的二叉树,再对比返回的序列
# 时间和空间复杂度不太好
class Solution:
def isSameTree(self, p: TreeNode, q: TreeNode) -> bool:
if self.bfs(p) == self.bfs(q):
return True
else:
return False
def bfs(self, root: TreeNode) -> str:
if not root:
return []
queue = [root]
res = []
while len(queue) != 0:
current = queue.pop(0)
if current is not None:
res.append(current.val)
else:
res.append(None)
if current != None:
queue.append(current.left)
queue.append(current.right)
return res
# https://leetcode-cn.com/problems/same-tree/solution/hua-jie-suan-fa-100-xiang-tong-de-shu-by-guanpengc/
# 深度优先遍历并比较
class Solution2:
def isSameTree(self, p: TreeNode, q: TreeNode) -> bool:
# 若当前节点都为空
if p is None and q is None:
return True
# 若当前节点一个为空而一个不为空
if p is None or q is None:
return False
# 若当前节点值不相同
if p.val != q.val:
return False
# 递归进入左右子树并返回结果的与
# 触发任意截止条件返回布尔值
return self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)
```
#### File: leetcode/binary_tree/235.py
```python
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
import copy
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
if p.val > root.val and q.val > root.val:
return self.lowestCommonAncestor(root.right, p, q)
elif p.val < root.val and q.val < root.val:
return self.lowestCommonAncestor(root.left, p, q)
else:
return root
```
#### File: leetcode/binary_tree/297.py
```python
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
if root is None:
return "[]"
queue = [root]
res = []
while len(queue) != 0:
current = queue[0]
if isinstance(current, TreeNode):
res.append(current.val)
else:
res.append(current)
queue.pop(0)
if current != None: # 把空的子节点也加入队列
queue.append(current.left)
queue.append(current.right)
# Remove the None in tail.
for i in range(len(res)-1, -1, -1):
if res[i] != None:
res = res[:i+1]
break
return str(res)
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
l = eval(data) # 解析list字符串
if len(l) == 0:
return None
root = TreeNode(l[0]) # root node
queue = [root] # 建立队列并将root压入队列
i = 0
# 广度优先遍历添加节点
# 广度优先添加节点和遍历二叉树逻辑相同
# current标记当前父节点,依次更新下标将非空的子节点加入
while len(queue) != 0:
current = queue.pop(0)
i += 1 # 添加左节点
if i < len(l):
if l[i] != None:
current.left = left = TreeNode(l[i])
queue.append(left)
i += 1 # 添加右节点
if i < len(l):
if l[i] != None:
current.right = right = TreeNode(l[i])
queue.append(right)
return root
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
if __name__ == "__main__":
# Test case: root
# 0
# / \
# 1 2
# / \ \
# 3 4 5
# / \
# 6 7
# /
# 8
root = TreeNode(0)
node1 = TreeNode(1)
node2 = TreeNode(2)
node3 = TreeNode(3)
node4 = TreeNode(4)
node5 = TreeNode(5)
node6 = TreeNode(6)
node7 = TreeNode(7)
node8 = TreeNode(8)
root.left = node1
root.right = node2
node1.left = node3
node1.right = node4
node2.right = node5
node5.left = node6
node5.right = node7
node7.left = node8
codec = Codec()
tree_str = codec.serialize(root)
tree = codec.deserialize(tree_str)
print(tree_str)
queue = [tree]
while len(queue) != 0:
current = queue.pop(0)
print(current.val, end=" -> ")
if current.left:
queue.append(current.left)
if current.right:
queue.append(current.right)
```
#### File: leetcode/binary_tree/98.py
```python
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# Time: 89%, Memory: 67%
class Solution:
def isValidBST(self, root: TreeNode) -> bool:
if root is None:
return True
return self.judgeTree(root)
def judgeTree(self, root: TreeNode, lower=float("-inf"), upper=float("inf")) -> bool:
#* 为节点的比较上下界设置默认值
#* 每次比较本节点和它的父节点而不是向下比较
#* e.g.:
#* 左节点一定小于它的父节点,则将上界设为父节点,下界维持默认的负无穷
# 若到达为空的叶子节点,显然是BST
if root is None:
return True
# 若非叶子节点时
rootVal = root.val
if rootVal <= lower or rootVal >= upper:
return False
# 递归进入左右子树判断
return self.judgeTree(root.left, lower, rootVal) and self.judgeTree(root.right, rootVal, upper)
if __name__ == "__main__":
S = Solution()
root = TreeNode(1)
l1 = TreeNode(1)
root.left = l1
print(S.isValidBST(root))
```
#### File: leetcode/bit_operation/231.py
```python
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
# 补码表示法中,x 与 -x 的共同点为最右侧符号相同
#* e.g.:
#* x补 = 15 = 0 0111
#* -x补 = -15 = 1 1001
#* x & (-x) = 0 0001 != x
#* x补 = 16 = 0 1000
#* -x补 = -16 = 1 1000
#* x & (-x) = 0 1000 == x
if n == 0:
return False
return n & (-n) == n
class Solution2:
def isPowerOfTw0(self, n: int) -> bool:
#* n & (n-1): n 最右侧的 1 变为 0
#* 若为 2 的 n 次幂, 只有一个 1
if n == 0:
return False
return n & (n-1) == 0
```
#### File: leetcode/bit_operation/JZ56-1.py
```python
class Solution:
def singleNumbers(self, nums: List[int]) -> List[int]:
# 对所有数字异或得到 a,b两数的异或结果
xor = 0
for i in nums:
xor = xor ^ i
# 取异或后最后一个为 1 的位
# 因为异或结果为 1 的位在两数上必不相同
# 因此可以按照该位来对所有数字进行分组
mask = 1
while xor & mask == 0:
mask = mask << 1
# 按照与 mask 的异或结果分组进行异或
m, n = 0, 0
for i in nums:
if i & mask == 0: # 此处不能以 1 来判断,结果只能为 0 或者 mask
m = m ^ i
else:
n = n ^ i
return [m, n]
```
#### File: leetcode/bit_operation/JZ56-2.py
```python
from typing import List
# LeetCode 137
class Solution:
def singleNumber(self, nums: List[int]) -> int:
bits = [0 for x in range(32)] # 记录32位里每一位 1 出现的次数
for num in nums:
j = 0
while j < 32: # 对一个数字进行32位循环右移,给对应出现 1 的位加 1
bits[j] += num & 1
num = num >> 1
j += 1
res = 0
for i in range(32):
bits[i] %= 3 # 对每一位的 1 的数量求 3 的模
res += bits[i] * (2**i) # 累加将二进制转为十进制
return res
if __name__ == "__main__":
S = Solution()
print(S.singleNumber([2,3,3,3]))
print(S.singleNumber([1,3,3,3,4,4,4]))
```
#### File: leetcode/linked_list/203.py
```python
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
# O(n) 复杂度的遍历法
# Time: 24%, Memory: 71%
class Solution:
def removeElements(self, head: ListNode, val: int) -> ListNode:
if head is None:
return None
res = ListNode("#")
res.next = head
temp = res
while temp.next != None:
# 若有值匹配的节点则删除,但是temp不移动
if temp.next.val == val:
temp.next = temp.next.next
else: # 没有匹配节点时向后移一位
temp = temp.next
return res.next
# 递归法
# 还更慢了什么玩意
class Solution2:
def removeElements(self, head: ListNode, val: int) -> ListNode:
# 递归终止条件:当前的头节点为空
if head is None:
return None
# 进入递归的部分,头节点之后的子链表
res = self.removeElements(head.next, val)
# 每层递归的处理
#* 这部分处理从链表尾部向前连接
#* # -> 0 -> 1 -> 2 -> 3 -> 4 -> ^
#* head res
# 若符合要求则不连接当前头节点,跳过head将res返回给下一层
if head.val == val:
return res
# 若不符合要求则将子链表接到当前头节点后
else:
head.next = res
return head
```
#### File: leetcode/linked_list/237.py
```python
class Solution:
def delete_node(self, node):
# Idea: Delete the next node, but perserve the value of its next node to achieve the same result.
node.val = node.next.val
node.next = node.next.next
```
#### File: leetcode/linked_list/24.py
```python
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
# Time: 50%, Memory: 33%
class Solution:
def swapPairs(self, head: ListNode) -> ListNode:
if not head:
return None
res = ListNode("#") # Dummy head
res.next = head
head = res
while head.next.next != None:
head = self.swapTwo(head)
# 在进入下一次循环前检验是否还剩下两个以上节点
# 避免出现对 None 引用 next 的情况
if head.next is None:
break
return res.next
# 两两交换
def swapTwo(self, head: ListNode) -> ListNode:
# 0->1->2->3->4->^
first = head.next
second = head.next.next
head.next = second
first.next = second.next
second.next = first
return first
```
#### File: leetcode/linked_list/25.py
```python
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def reverseKGroup(self, head: 'ListNode', k: int) -> 'ListNode':
self.k = k
p = ListNode(-1)
res = p
p.next = head
q = p
# Get length
temp = head
l_list = 1
while temp.next:
temp = temp.next
l_list += 1
for j in range(l_list // k):
for i in range(k+1):
q = q.next
p = self.reverseList(p, q)
q = p
return res.next
def reverseList(self, head, tail):
_tail = tail
p = head.next
# 尾插法,将p指向的节点插到尾部
while p != tail:
head.next = p.next
p.next = _tail
_tail = p
p = head.next
# 将head和逆序链表开头连接起来
head.next = _tail
for i in range(self.k):
head = head.next
return head
if __name__ == "__main__":
l0 = ListNode(0)
l1 = ListNode(1)
l2 = ListNode(2)
l3 = ListNode(3)
l4 = ListNode(4)
l5 = ListNode(5)
l0.next = l1
l1.next = l2
l2.next = l3
l3.next = l4
l4.next = l5
s = Solution()
res = s.reverseKGroup(l0, 2)
temp = res
while temp:
print(temp.val)
temp = temp.next
```
#### File: leetcode/search/57.py
```python
from typing import List
# Time: 10%, Memory: 100%
class Solution:
def insert(self, intervals: List[List[int]], newInterval: List[int]) -> List[List[int]]:
if not intervals and not newInterval:
return []
if not newInterval:
return intervals
intervals.append(newInterval)
intervals.sort(key=lambda x: x[0])
# Same code as 56.
i = 0
l = len(intervals)
while i < l - 1:
current = intervals[i]
nextt = intervals[i+1]
# 分情况讨论区间
if nextt[0] >= current[0] and nextt[0] <= current[1] and nextt[1] > current[1]:
intervals.pop(i)
intervals.pop(i)
intervals.insert(i, [current[0], nextt[1]])
elif current[0] <= nextt[0] and current[1] >= nextt[1]:
intervals.pop(i+1)
elif nextt[1] <= current[1] and nextt[1] >= current[0] and nextt[0] < current[0]:
intervals.pop(i)
intervals.pop(i)
intervals.insert(i, [nextt[0], current[1]])
else:
i += 1
l = len(intervals)
return intervals
# Time: 45%, Memory: 100%
class Solution2:
def insert(self, intervals: List[List[int]], newInterval: List[int]) -> List[List[int]]:
if not intervals and not newInterval:
return []
if not intervals:
return [newInterval]
if not newInterval:
return intervals
i = self.binsearch(intervals, newInterval)
l = len(intervals)
while i < l - 1:
current = intervals[i]
nextt = intervals[i+1]
# 分情况讨论区间
if nextt[0] >= current[0] and nextt[0] <= current[1] and nextt[1] > current[1]:
intervals.pop(i)
intervals.pop(i)
intervals.insert(i, [current[0], nextt[1]])
elif current[0] <= nextt[0] and current[1] >= nextt[1]:
intervals.pop(i+1)
elif nextt[1] <= current[1] and nextt[1] >= current[0] and nextt[0] < current[0]:
intervals.pop(i)
intervals.pop(i)
intervals.insert(i, [nextt[0], current[1]])
else:
i += 1
l = len(intervals)
return intervals
def binsearch(self, seq: List[List[int]], target: List[int]) -> int:
if target[0] > seq[len(seq)-1][0]:
seq.append(target)
if target[0] < seq[0][0]:
seq.insert(0, target)
low = 0
high = len(seq) - 1
l = high - low + 1
mid = (low + high) // 2
while l > 1:
if target[0] < seq[mid][0]:
high = mid
elif target[0] > seq[mid][0]:
low = mid
elif target[0] == seq[mid][0]:
seq.insert(mid+1, target)
return mid
l = high - low
mid = (low + high) // 2
if seq[mid][0] > target[0]:
seq.insert(mid, target)
else:
seq.insert(mid+1, target)
return mid
if __name__ == "__main__":
intervals = [[2,3],[4,5]]
newInterval = [0,3]
S = Solution2()
# print(S.binsearch(intervals, newInterval))
# print(intervals)
print(S.insert(intervals, newInterval))
```
#### File: leetcode/sort/350.py
```python
from typing import List
import collections
# 求的不是按顺序的交集,是元素单独的带重复交集
# Credit: LeetCode/RUMIF
class Solution:
def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:
# 数两个数组中元素的个数,以元素为键,个数为值存入dict
# 取两个dict的相同键的项的最小值
num1 = collections.Counter(nums1)
num2 = collections.Counter(nums2)
num = num1 & num2
return num.elements()
# 排序后求交集
class Solution2:
def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:
self.qsort(nums1, 0, len(nums1)-1)
self.qsort(nums2, 0, len(nums2)-1)
i, j = 0, 0
res = []
while i < len(nums1) and j < len(nums2):
if nums1[i] < nums2[j]:
i += 1
elif nums1[i] > nums2[j]:
j += 1
else:
res.append(nums1[i])
i += 1
j += 1
return res
# Quick sort
def qsort(self, seq: list, low: int, high: int):
i = low
j = high
if low < high:
base = seq[i]
while i < j:
while i < j and seq[j] > base:
j -= 1
if i < j:
seq[i] = seq[j]
i += 1
while i < j and seq[i] < base:
i += 1
if i < j:
seq[j] = seq[i]
j -= 1
seq[i] = base
self.qsort(seq, low, i-1)
self.qsort(seq, i+1, high)
```
#### File: leetcode/stack/232.py
```python
class MyQueue:
def __init__(self):
"""
Initialize your data structure here.
"""
self.s1 = []
self.s2 = []
def push(self, x: int) -> None:
"""
Push element x to the back of queue.
"""
self.s1.append(x)
def pop(self) -> int:
"""
Removes the element from in front of queue and returns that element.
"""
if not self.s2:
while self.s1:
self.s2.append(self.s1.pop())
return self.s2.pop()
def peek(self) -> int:
"""
Get the front element.
"""
if not self.s2:
while self.s1:
self.s2.append(self.s1.pop())
res = self.s2.pop()
self.s2.append(res)
return res
def empty(self) -> bool:
"""
Returns whether the queue is empty.
"""
return len(self.s1) == 0 and len(self.s2) == 0
```
#### File: leetcode/string/1513.py
```python
class Solution:
def numSub(self, s: str) -> int:
if not s or "1" not in s:
return 0
count = 0
ones = s.split("0")
for i in ones:
if i == "":
continue
count += self.calculate(len(i))
return int(count % (1e9+7))
# 计算连续子集数量公式
# f(n) = n^2 - f(n-1)
def calculate(self, num: int) -> int:
f_1 = 1
i = 1
while i != num:
i += 1
f_1 = i**2 - f_1
return f_1
```
#### File: leetcode/string/151.py
```python
class Solution:
def reverseWords(self, s: str) -> str:
stack = []
temp = ""
i = 0
while i < len(s):
if s[i] != " ":
temp += str(s[i])
if s[i] == " " and temp != "":
stack.insert(0, temp)
temp = ""
i += 1
if temp != "":
stack.insert(0, temp)
return " ".join(stack)
```
#### File: leetcode/string/165.py
```python
class Solution:
def compareVersion(self, version1: str, version2: str) -> int:
version1 = version1.split(".")
version2 = version2.split(".")
l1 = len(version1)
l2 = len(version2)
# 补充 0 使两列表长度相等
if l1 > l2:
version2.extend(['0' for _ in range(l1 - l2)])
elif l2 > l1:
version1.extend(['0' for _ in range(l2 - l1)])
# 按规则比较
for i1, i2 in zip(version1, version2):
if int(i1) > int(i2):
return 1
elif int(i1) < int(i2):
return -1
return 0
```
#### File: leetcode/string/3.py
```python
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
l = len(s)
if l <= 1:
return l
ch_set = set()
right = -1 # Starts at the left of the left index
length = 0
# Move left index
for i in range(l):
# Skip the first iteration.
if i != 0:
ch_set.remove(s[i-1]) # Remove last character
# Move the right index
# Break the loop when encounters existing character.
while right + 1 < l and s[right+1] not in ch_set: # Judge the NEXT character BEFORE entering the loop
ch_set.add(s[right+1]) # Add next character
right += 1
# Get the maxmium length of sub-sequence.
length = max(length, right - i + 1)
return length
```
#### File: leetcode/string/9.py
```python
class Solution:
def isPalindrome(self, x: int) -> bool:
return str(x) == "".join(reversed(str(x)))
# 正经操作
class Solution:
def isPalindrome(self, x: int) -> bool:
x = str(x)
i, j = 0, len(x) - 1
while i < j: # 截至条件:指针相遇并交叉后
if x[i] != x[j]:
return False
else:
i += 1
j -= 1
return True
``` |
{
"source": "1liale/NameThat-",
"score": 2
} |
#### File: migration/versions/ef75d5fd23da_create_user_and_picture_tables.py
```python
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
# revision identifiers, used by Alembic.
revision = 'ef<PASSWORD>da'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False),
sa.Column('username', sa.String(length=20), nullable=False),
sa.Column('email', sa.UnicodeText(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id'),
sa.UniqueConstraint('username')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)
op.create_table('picture',
sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False),
sa.Column('storage_file_id', sa.String(length=60), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=False),
sa.Column('user_id', sqlalchemy_utils.types.uuid.UUIDType(), nullable=False),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id'),
sa.UniqueConstraint('storage_file_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('picture')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
# ### end Alembic commands ###
```
#### File: backend/fish/routes.py
```python
from flask import request
from flask.wrappers import Response
import io
import json
import logging
from functools import wraps
from keras.preprocessing.image import img_to_array
import numpy as np
from PIL import Image
import tensorflow as tf
from fish import app
from backend.database.orm.models import User, Picture
from backend.database.orm.session_context import SessionContext
from backend.model.fish_identifier import make_model
model = make_model(n_classes=20)
latest = tf.train.latest_checkpoint('./model/trained_weights')
model.load_weights(latest)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
fish = ['Atlamtic Salmon', 'Bluegill', 'Brook Trout', 'Channel Catfish', 'Chinook Salmon', 'Crappie', 'Flathead Catfish', 'Lake Sturgeon', 'Sea Lamphrey', 'Largemouth Bass', 'Muskellunge', 'Northern Pike', 'Not Recognized', 'Rainbow Trout', 'Rock Bass', 'Smallmouth Bass', 'Sunfish', 'Walleye', 'White Perch', 'Yellow Perch']
def exception_handler(func):
@wraps(func)
def _exception_handler(*args, **kwargs):
try:
result = func(*args, **kwargs)
except Exception as e:
logger.exception(e)
return Response(json.dumps({'error': 'unexpected_error', 'message': str(e)}),
status=401,
content_type='application/json')
return result
return _exception_handler
@app.route('/', methods=['GET', 'POST'])
def handle_health_check():
"""Return response 200 for successful health check"""
return Response(status=200)
@app.route("/user/<user_id>", methods=["GET"])
@exception_handler
def get_user(user_id):
with SessionContext() as session:
user = session.get_first(User, id=user_id)
logger.info('Successfully fetched the user')
user_return = {'id': str(user.id), 'username': user.username, 'email': user.email, 'pictures': user.pictures}
return Response(json.dumps(user_return),
status=200,
content_type='applications/json')
@app.route("/user/create", methods=["POST"])
@exception_handler
def create_user():
data = request.get_json()
with SessionContext() as session:
if data is not None:
session.create(User(**data))
logger.info('Successfully created the user')
return Response(status=200)
else:
raise('Query must be provided in the request body')
@app.route("/user/update/<user_id>", methods=["POST"])
@exception_handler
def update_user(user_id):
data = request.get_json()
with SessionContext() as session:
if data is not None:
session.update_any(User, User(**data), id=user_id)
logger.info('Successfully updated the user')
return Response(status=200)
else:
raise('Query must be provided in the request body')
@app.route("/user/delete/<user_id>", methods=["POST"])
@exception_handler
def delete_user(user_id):
with SessionContext() as session:
session.delete_any(User, id=user_id)
logger.info('Successfully deleted the user')
return Response(status=200)
@app.route("/predict", methods=["POST"])
@exception_handler
def predict():
if request.method == 'POST':
image = request.files['fish'].read()
image = Image.open(io.BytesIO(image))
if image.mode != 'RGB':
image = image.convert('RGB')
image = image.resize((512, 512))
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
prediction = model.predict(image)[0]
man_idx = np.argmax(prediction)
predicted_fish = fish[man_idx]
return Response(json.dumps({'prediction': predicted_fish}),
status=200,
content_type='applications/json')
``` |
{
"source": "1littlelamb/Dabdub_ecQuestions",
"score": 3
} |
#### File: Dabdub_ecQuestions/2017/erdosCalc.py
```python
from tqdm import tqdm
def primeFactorization(n):
divisors = [ d for d in range(2,n//2+1) if n % d == 0 ]
return [ d for d in divisors if \
all( d % od != 0 for od in divisors if od != d ) ]
howMany = input('\n' + 'How many Erdos-Woods numbers would you like to find? ' + '\n' + '\n' +\
'NOTICE: THIS ALGORITHM REQUIRES A LARGE AMOUNT OF COMPUTE POWER!' + '\n' + '\n' +\
'Enter the number here: ')
erdos_nums = []
while len(erdos_nums) < int(howMany):
for k in tqdm(range(24)):
if k < 2:
continue
a_list = []
min_extrema = 0
max_extrema = 0
primes_min = []
primes_max = []
primes_master = []
for a in range(2400):
if a <= k:
continue
a_list = [a+i for i in range(k+1)]
primes_min = primeFactorization(a_list[0])
primes_max = primeFactorization(a_list[-1])
primes_master = (primes_max + primes_min)
if all(any(elem in primes_master for elem in primeFactorization(i)) for i in a_list[1:-1]):
print(f'Erdos-Woods found: k={k} a={a}')
erdos_nums.append(k)
break
``` |
{
"source": "1lo0/pexcel_openpyxl",
"score": 4
} |
#### File: pexcel_openpyxl/common/util.py
```python
class ExcelUtil:
"""
excel 工具类
"""
@classmethod
def get_letter_list(self, uppered = True):
"""
获取26英文字母集合
:param uppered: 默认大写
:return: 26英文字母list
"""
lower_case_list = [chr(i) for i in range(97, 123)]
if(uppered):
upper_case_list = []
for i in lower_case_list:
upper_case_list.append(i.upper())
return upper_case_list
return lower_case_list
def tuple_empty(self):
"""
判断tuple是否为空
:return:
"""
```
#### File: pexcel_openpyxl/style/default.py
```python
from openpyxl.styles import PatternFill, Border, Side, Alignment, Protection, Font,NamedStyle
class ExcelFontStyle:
"""
excel字体风格,背景
"""
__default_style__ = NamedStyle('default')
__default_style__.border = Border(left=Side(border_style='thin'),
right=Side(border_style='thin'),
top=Side(border_style='thin'),
bottom=Side(border_style='thin'))
__default_style__.font = Font(size=12)
__default_style__.alignment = Alignment(horizontal='center', vertical='center', )
# header_style
__header_style__ = NamedStyle("header_style")
__header_style__.font = Font(bold=True, size=18)
__header_style__.fill = PatternFill(fill_type='solid',
start_color='00C0C0C0',
end_color='00C0C0C0',)
__header_style__.border = Border(left=Side(border_style='thin'),
right=Side(border_style='thin'),
top=Side(border_style='thin'),
bottom=Side(border_style='thin'))
__header_style__.alignment = Alignment(horizontal='center', vertical='center', )
__header_style__.number_format = 'General'
#openpyxl默认样式
__openpyxl_default_style__ = NamedStyle("openpyxl_style")
__openpyxl_default_style__.font = Font(name='Calibri',
size=11,
bold=False,
italic=False,
vertAlign=None,
underline='none',
strike=False,
color='FF000000')
__openpyxl_default_style__.fill = PatternFill(fill_type=None,
start_color='FFFFFFFF',
end_color='FF000000')
__openpyxl_default_style__.border = Border(left=Side(border_style=None,
color='FF000000'),
right=Side(border_style=None,
color='FF000000'),
top=Side(border_style=None,
color='FF000000'),
bottom=Side(border_style=None,
color='FF000000'),
diagonal=Side(border_style=None,
color='FF000000'),
diagonal_direction=0,
outline=Side(border_style=None,
color='FF000000'),
vertical=Side(border_style=None,
color='FF000000'),
horizontal=Side(border_style=None,
color='FF000000')
)
__openpyxl_default_style__.alignment = Alignment(horizontal='general',
vertical='bottom',
text_rotation=0,
wrap_text=False,
shrink_to_fit=False,
indent=0)
__openpyxl_default_style__.number_format = 'General'
__openpyxl_default_style__.protection = Protection(locked=True,
hidden=False)
@classmethod
def get_default_style(self):
"""
设置单元格格式,其中字体及背景为默认
#TODO 表格样式和筛选
:return:
"""
return self.__default_style__
@classmethod
def get_openpyxl_default_style(self):
return self.__openpyxl_default_style__
@classmethod
def get_header_style(self):
return self.__header_style__
``` |
{
"source": "1lol/Calibration-less_pMRI_optimal_control",
"score": 2
} |
#### File: 1lol/Calibration-less_pMRI_optimal_control/Utils.py
```python
import tensorflow as tf
import numpy as np
import sys
sys.path.append('../')
from scipy.io import savemat
import os
import scipy.misc
#from spectrum import fftshift
#from tensorflow.python.ops.signal.helper import fftshift
#from tensorflow.python.ops.signal.helper import ifftshift
#from tensorflow.python import roll as _roll
#from tensorflow.python.framework import ops
#from tensorflow.python.util.tf_export import tf_export
def fft2c(img):
""" Centered fft2 """
return np.fft.fft2(img) / np.sqrt(img.shape[-2]*img.shape[-1])
def ifft2c(img):
""" Centered ifft2 """
return np.fft.ifft2(img) * np.sqrt(img.shape[-2]*img.shape[-1])
def mriAdjointOp(rawdata, coilsens, mask):
""" Adjoint MRI Cartesian Operator """
mask = np.expand_dims( mask.astype(np.float32), axis=1)
return np.sum(ifft2c(rawdata * mask)*np.conj(coilsens), axis=1)
def mriForwardOp(img, coilsens, mask):
""" Forward MRI Cartesian Operator """
# mask = np.expand_dims( mask.astype(np.float32), axis=1)
# img = np.expand_dims( img, axis=1)
#
return fft2c(coilsens * img)*mask
def fftshift(x, axes=None):
"""
Shift the zero-frequency component to the center of the spectrum.
This function swaps half-spaces for all axes listed (defaults to all).
Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even.
Parameters
----------
x : array_like, Tensor
Input array.
axes : int or shape tuple, optional
Axes over which to shift. Default is None, which shifts all axes.
Returns
-------
y : Tensor.
"""
x = tf.convert_to_tensor(x)
if axes is None:
axes = tuple(range(tf.rank(x)))
shift = [dim // 2 for dim in x.shape]
elif isinstance(axes, int):
shift = x.shape[axes] // 2
else:
shift = [x.shape[ax] // 2 for ax in axes]
return tf.roll(x, shift, axes)
#
##@tf_export("signal.ifftshift")
def ifftshift(x, axes=None):
"""
The inverse of `fftshift`. Although identical for even-length `x`, the
functions differ by one sample for odd-length `x`.
Parameters
----------
x : array_like, Tensor.
axes : int or shape tuple, optional
Axes over which to calculate. Defaults to None, which shifts all axes.
Returns
-------
y : Tensor.
"""
#x = ops.convert_to_tensor_v2(x)
if axes is None:
axes = tuple(range(tf.keras.backend.dim(x)))
shift = [-(dim // 2) for dim in x.shape]
elif isinstance(axes, int):
shift = -(x.shape[axes] // 2)
else:
shift = [-(x.shape[ax] // 2) for ax in axes]
return tf.roll(x, shift, axes)
def ifftc2d(inp):
""" Centered inverse 2d Fourier transform, performed on axis (-1,-2).
"""
shape = tf.shape(inp)
numel = shape[-2]*shape[-1]
scale = tf.sqrt(tf.cast(numel, tf.float32))
#out = fftshift(tf.ifft2d(ifftshift(inp, axes= None)), axes= None)
out = tf.ifft2d(inp)
out = tf.complex(tf.real(out)*scale, tf.imag(out)*scale)
return out
def fftc2d(inp):
""" Centered 2d Fourier transform, performed on axis (-1,-2).
"""
shape = tf.shape(inp)
numel = shape[-2]*shape[-1]
scale = 1.0 / tf.sqrt(tf.cast(numel, tf.float32))
#out = fftshift(tf.fft2d(ifftshift(inp, axes= None)), axes= None)
out = tf.fft2d(inp)
out = tf.complex(tf.real(out) * scale, tf.imag(out) * scale)
return out
def removeFEOversampling(src):
""" Remove Frequency Encoding (FE) oversampling.
This is implemented such that they match with the DICOM images.
"""
assert src.ndim >= 2
nFE, nPE = src.shape[-2:]
if nPE != nFE:
return np.take(src, np.arange(int(nFE*0.25)+1, int(nFE*0.75)+1), axis=-2)
else:
return src
def removePEOversampling(src):
""" Remove Phase Encoding (PE) oversampling. """
nPE = src.shape[-1]
nFE = src.shape[-2]
PE_OS_crop = (nPE - nFE) / 2
if PE_OS_crop == 0:
return src
else:
return np.take(src, np.arange(int(PE_OS_crop)+1, nPE-int(PE_OS_crop)+1), axis=-1)
def removeFE(src):
assert src.ndim >= 2
nFE, nPE = src.shape[-2:]
return np.take(src, np.arange(int(nFE*0.25)+1, int(nFE*0.75)+1), axis=-2)
def removePE(src):
nPE = src.shape[-1]
nFE = src.shape[-2]
PE_OS_crop = (nPE - nFE) / 2
return np.take(src, np.arange(int(PE_OS_crop)+1, nPE-int(PE_OS_crop)+1), axis=-1)
def ssim(input, target, ksize=11, sigma=1.5, L=1.0):
def ssimKernel(ksize=ksize, sigma=sigma):
if sigma == None: # no gauss weighting
kernel = np.ones((ksize, ksize, 1, 1)).astype(np.float32)
else:
x, y = np.mgrid[-ksize // 2 + 1:ksize // 2 + 1, -ksize // 2 + 1:ksize // 2 + 1]
kernel = np.exp(-((x ** 2 + y ** 2) / (2.0 * sigma ** 2)))
kernel = kernel[:, :, np.newaxis, np.newaxis].astype(np.float32)
return kernel / np.sum(kernel)
kernel = tf.Variable(ssimKernel(), name='ssim_kernel', trainable=False)
K1 = 0.01
K2 = 0.03
C1 = (K1 * L) ** 2
C2 = (K2 * L) ** 2
mu1 = tf.nn.conv2d(input, kernel, strides=[1, 1, 1, 1], padding='VALID', data_format='NHWC')
mu2 = tf.nn.conv2d(target, kernel, strides=[1, 1, 1, 1], padding='VALID', data_format='NHWC')
mu1_sqr = mu1 ** 2
mu2_sqr = mu2 ** 2
mu1mu2 = mu1 * mu2
sigma1_sqr = tf.nn.conv2d(input * input, kernel, strides=[1, 1, 1, 1], padding='VALID',
data_format='NHWC') - mu1_sqr
sigma2_sqr = tf.nn.conv2d(target * target, kernel, strides=[1, 1, 1, 1], padding='VALID',
data_format='NHWC') - mu2_sqr
sigma12 = tf.nn.conv2d(input * target, kernel, strides=[1, 1, 1, 1], padding='VALID', data_format='NHWC') - mu1mu2
ssim_maps = ((2.0 * mu1mu2 + C1) * (2.0 * sigma12 + C2)) / ((mu1_sqr + mu2_sqr + C1) *
(sigma1_sqr + sigma2_sqr + C2))
return tf.reduce_mean(tf.reduce_mean(ssim_maps, axis=(1, 2, 3)))
def saveAsMat(img, filename, matlab_id, mat_dict=None):
""" Save mat files with ndim in [2,3,4]
Args:
img: image to be saved
file_path: base directory
matlab_id: identifer of variable
mat_dict: additional variables to be saved
"""
assert img.ndim in [2, 3, 4]
img_arg = img.copy()
if img.ndim == 3:
img_arg = np.transpose(img_arg, (1, 2, 0))
elif img.ndim == 4:
img_arg = np.transpose(img_arg, (2, 3, 0, 1))
if mat_dict == None:
mat_dict = {matlab_id: img_arg}
else:
mat_dict[matlab_id] = img_arg
dirname = os.path.dirname(filename) or '.'
if not os.path.exists(dirname):
os.makedirs(dirname)
savemat(filename, mat_dict)
def _normalize(img):
""" Normalize image between [0, 1] """
tmp = img - np.min(img)
tmp /= np.max(tmp)
return tmp
def contrastStretching(img, saturated_pixel=0.004):
""" constrast stretching according to imageJ
http://homepages.inf.ed.ac.uk/rbf/HIPR2/stretch.htm"""
values = np.sort(img, axis=None)
nr_pixels = np.size(values)
lim = int(np.round(saturated_pixel*nr_pixels))
v_min = values[lim]
v_max = values[-lim-1]
img = (img - v_min)*(255.0)/(v_max - v_min)
img = np.minimum(255.0, np.maximum(0.0, img))
return img
def getContrastStretchingLimits(img, saturated_pixel=0.004):
""" constrast stretching according to imageJ
http://homepages.inf.ed.ac.uk/rbf/HIPR2/stretch.htm"""
values = np.sort(img, axis=None)
nr_pixels = np.size(values)
lim = int(np.round(saturated_pixel*nr_pixels))
v_min = values[lim]
v_max = values[-lim-1]
return v_min, v_max
def normalize(img, v_min, v_max, max_int=255.0):
""" normalize image to [0, max_int] according to image intensities [v_min, v_max] """
img = (img - v_min)*(max_int)/(v_max - v_min)
img = np.minimum(max_int, np.maximum(0.0, img))
return img
def imsave(img, filepath, normalize=True):
""" Save an image. """
path = os.path.dirname(filepath) or '.'
if not os.path.exists(path):
os.makedirs(path)
if img.dtype == np.complex64 or img.dtype == np.complex128:
print('img is complex! Take absolute value.')
img = np.abs(img)
if normalize:
img = _normalize(img)
img *= 255.0
#scipy.misc.imsave(filepath, img.astype(np.uint8))
``` |
{
"source": "1lomeno3/sap-hana",
"score": 2
} |
#### File: python/downloader/SAP_Scenarios.py
```python
class Package(object):
selector_newest = 'max(range(len(results)), key=lambda index: results[index]["ReleaseDate"])'
selector_oldest = 'min(range(len(results)), key=lambda index: results[index]["ReleaseDate"])'
dir_db = "DB"
dir_app = "APP"
dir_rti = "RTI"
os_linux = "LINUX_X64"
os_windows = "NT_X64"
os_indep = "OSINDEP"
def __init__(self, name=None, target_dir=None, retr_params=None, condition=None, filter=None, os_avail=None, os_var=None, selector=None):
self.name = name if name else ""
self.target_dir = target_dir if target_dir else ""
self.retr_params = retr_params if retr_params else ""
self.condition = condition if condition else []
self.filter = filter if filter else []
self.os_avail = os_avail if os_avail else []
self.os_var = os_var if os_var else ""
self.selector = selector if selector else ""
class Scenario(object):
def __init__(self, name=None, required_params=None, packages=None):
self.name = name
self.required_params = required_params if required_params else []
self.packages = packages if packages else []
class DBScenario(Scenario):
def __init__(self, **kwargs):
super(DBComponent, self).__init__(**kwargs)
S4 = Scenario(
required_params = [],
packages = [
],
)
RTI = Scenario(
required_params = [],
packages = [
Package(
name = "SAPCAR",
target_dir = Package.dir_rti,
retr_params = {"ENR": "67838200100200019185"},
os_avail = [Package.os_linux, Package.os_windows],
os_var = 'rti.os_type',
selector = Package.selector_newest,
),
],
)
HDB = Scenario(
required_params = [
"product_version",
],
packages = [
# _ _ _ _ __ ___
# | | | | /\ | \ | | /\ /_ | / _ \
# | |__| | / \ | \| | / \ | || | | |
# | __ | / /\ \ | . ` | / /\ \ | || | | |
# | | | |/ ____ \| |\ |/ ____ \ | || |_| |
# |_| |_/_/ \_\_| \_/_/ \_\ |_(_)___/
#
#############################################################
# HANA Platform 1.0
Package(
name = "IMDB_PLATFORM100",
target_dir = Package.dir_db,
retr_params = {"ENR": "01200314690900003484", "SWTYPSC": "N", "PECCLSC": "NONE", "V": "INST", "TA": "ACTUAL"},
condition = ['db.product_version == "1.0"', '"PLATFORM" in db.components'],
filter = ['"1.0" in r["Description"]', '"SAP HANA Platf" in r["Description"]','"ZIP" in r["Infotype"]'],
os_avail = [Package.os_indep],
os_var = 'Package.os_indep',
selector = Package.selector_newest,
),
# HANA Database 1.0 (Linux only)
Package(
name = "IMDB_SERVER100",
target_dir = Package.dir_db,
retr_params = {"ENR": "01200615320200017790"},
condition = ['db.product_version == "1.0"', '"DATABASE" in db.components'],
filter = ['"Maintenance Revision" in r["Description"]'],
os_avail = [Package.os_linux],
os_var = 'Package.os_linux',
selector = Package.selector_newest,
),
#############################################################
# HANA Client for HANA 1.0 (Windows/Linux)
Package(
name = "IMDB_CLIENT100",
target_dir = Package.dir_app,
retr_params = {"ENR": "01200615320200017866"},
condition = ['db.product_version == "1.0"', '"CLIENT" in db.components'],
filter = None,
os_avail = [Package.os_linux, Package.os_windows],
os_var = 'app.os_type',
selector = Package.selector_newest,
),
#############################################################
# HANA Studio for HANA 1.0 (Windows/Linux)
Package(
name = "IMC_STUDIO1",
target_dir = Package.dir_db,
retr_params = {"ENR": "73554900100200000585"},
condition = ['db.product_version == "1.0"', '"STUDIO" in db.components'],
filter = ['"Revision 1" in r["Description"]'],
os_avail = [Package.os_linux, Package.os_windows],
os_var = 'Config.bastion_os',
selector = Package.selector_newest,
),
# _ _ _ _ ___ ___
# | | | | /\ | \ | | /\ |__ \ / _ \
# | |__| | / \ | \| | / \ ) || | | |
# | __ | / /\ \ | . ` | / /\ \ / / | | | |
# | | | |/ ____ \| |\ |/ ____ \ / /_ | |_| |
# |_| |_/_/ \_\_| \_/_/ \_\ |____(_)___/
#
#############################################################
# HANA Platform 2.0
Package(
name = "IMDB_PLATFORM200",
target_dir = Package.dir_db,
retr_params = {"ENR": "73554900100900001301", "SWTYPSC": "N", "PECCLSC": "NONE", "V": "INST", "TA": "ACTUAL"},
condition = ['db.product_version == "2.0"', '"PLATFORM" in db.components'],
filter = ['"2.0" in r["Description"]', '"86_64" in r["Description"]','"ZIP" in r["Infotype"]'],
os_avail = [Package.os_indep],
os_var = 'Package.os_indep',
selector = Package.selector_newest,
),
# HANA Database 2.0 (Linux only)
Package(
name = "IMDB_SERVER200",
target_dir = Package.dir_db,
retr_params = {"ENR": "73554900100200005327"},
condition = ['db.product_version == "2.0"', '"DATABASE" in db.components'],
filter = ['"Revision" in r["Description"]'],
os_avail = [Package.os_linux],
os_var = 'Package.os_linux',
selector = Package.selector_newest,
),
#############################################################
# HANA Client for HANA 2.0 (Windows/Linux)
Package(
name = "IMDB_CLIENT20-NT_X64",
target_dir = Package.dir_app,
retr_params = {"ENR": "73554900100200005390"},
condition = ['db.product_version == "2.0"', '"CLIENT" in db.components'],
filter = None,
os_avail = [Package.os_linux, Package.os_windows],
os_var = 'app.os_type',
selector = Package.selector_newest,
),
#############################################################
# HANA Studio for HANA 2.0 (Windows/Linux)
Package(
name = "IMC_STUDIO2-NT_X64",
target_dir = Package.dir_db,
retr_params = {"ENR": "73554900100200000585"},
condition = ['db.product_version == "2.0"', '"STUDIO" in db.components'],
filter = ['"Revision 2" in r["Description"]'],
os_avail = [Package.os_linux, Package.os_windows],
os_var = 'Config.bastion_os',
selector = Package.selector_newest,
),
# __ __ _____
# \ \ / // ____| /\
# \ V /| (___ / \
# > < \___ \ / /\ \
# / . \ ____) / ____ \
# /_/ \_\_____/_/ \_\
#
#############################################################
# XS Advanced Runtime (SAP Extended App Services)
Package(
name = "EXTAPPSER00P",
target_dir = Package.dir_db,
retr_params = {"ENR": "73555000100200004274"},
condition = ['"XSA" in db.components'],
filter = None,
os_avail = [Package.os_linux],
os_var = 'Package.os_linux',
selector = Package.selector_newest,
),
# DI Core
Package(
name = "XSACDEVXDI",
target_dir = Package.dir_db,
retr_params = {
"ENR" : "73554900100200003056",
"PECCLSC" : "PLTFRM",
"INCL_PECCLSC2" : "DB",
"PECGRSC2" : "HDB"
},
condition = ['"XSA" in db.components'],
filter = None,
os_avail = [Package.os_indep],
os_var = 'Package.os_indep',
selector = Package.selector_newest,
),
# SAPUI5 FESV4
Package(
name = "XSACUI5FESV4",
target_dir = Package.dir_db,
retr_params = {"ENR": "73554900100200006811"},
condition = ['"XSA" in db.components'],
filter = None,
os_avail = [Package.os_indep],
os_var = 'Package.os_indep',
selector = Package.selector_newest,
),
]
)
avail_apps = {"S4": S4}
avail_dbs = {"HANA": HDB}
avail_rtis = {"RTI": RTI}
```
#### File: python/downloader/SAP_SMP.py
```python
import urllib.error
import urllib.parse
import urllib.request
from bs4 import BeautifulSoup
from helper import *
class SMP:
url_launchpad = "https://launchpad.support.sap.com"
url_auth = "https://authn.hana.ondemand.com/saml2/sp/mds"
url_sso = "https://accounts.sap.com/saml2/idp/sso"
url_portal = "https://authn.hana.ondemand.com/saml2/sp/acs/supportportal/supportportal"
url_search = "https://launchpad.support.sap.com/services/odata/svt/swdcuisrv/SearchResultSet"
url_retrieve = "https://launchpad.support.sap.com/services/odata/svt/swdcuisrv/DownloadItemSet"
params_maint = {
"SWTYPSC" : "SPP",
"V" : "MAINT"
}
sess = None
@staticmethod
def process_page(url, desc=None, required_inputs=[], referer=None, extra_headers=None, post_data=None, body_prefix=None, allow_redirects=True):
headers = SMP.sess.headers
if referer:
headers["Referer"] = referer
if extra_headers:
for field in list(extra_headers.keys()):
headers[field] = extra_headers[field]
if post_data:
headers["Content-Type"] = "application/x-www-form-urlencoded"
if not post_data:
resp = SMP.sess.get(
url,
headers = headers,
allow_redirects = allow_redirects,
)
else:
resp = SMP.sess.post(
url,
headers = headers,
data = post_data,
allow_redirects = allow_redirects,
)
assert(resp.status_code < 400), \
"Unable to %s (%s); status = %d" % (desc, url, resp.status_code)
soup = BeautifulSoup(resp.content, 'html.parser')
inputs = {}
for i in soup.find_all("input"):
field, value = i.get("name"), i.get("value")
inputs[field] = value
assert(all(i in inputs for i in required_inputs)), \
"%s inputs (%s) does not contain required fields (%s)" % (desc, inputs, required_inputs)
next_body = body_prefix + "&" if body_prefix else ""
for cnt in range(len(required_inputs)):
if cnt > 0:
next_body += "&"
i = required_inputs[cnt]
next_body += "%s=%s" % (i, urllib.parse.quote(inputs[i]))
return(resp.cookies, inputs, next_body)
@staticmethod
def init():
SMP.sess = HTTPSession(
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.109 Safari/537.36",
},
)
# --------------------------------------------------------
# STEP 1 - Open SMP and obtain SAML envelope
# --------------------------------------------------------
(cookies_launchpad, inputs_launchpad, body_auth) = SMP.process_page(
url = SMP.url_launchpad,
desc = "open SMP",
required_inputs = [
"tenantId",
"idpName",
"requestUrl",
"requestId",
"relayState",
"signature",
],
body_prefix = "action=sso",
)
# --------------------------------------------------------
# STEP 2 - Connect to IDP and send SAML envelope
# --------------------------------------------------------
(cookies_auth, inputs_auth, body_sso) = SMP.process_page(
url = SMP.url_auth,
desc = "connect to IDP",
required_inputs = [
"SAMLRequest",
"RelayState",
],
referer = SMP.url_launchpad,
post_data = body_auth,
)
# --------------------------------------------------------
# STEP 3 - Prepare SAML request and send credentials
# --------------------------------------------------------
(cookies_sso, inputs_sso, body_sso) = SMP.process_page(
url = SMP.url_sso,
desc = "prepare SAML request",
required_inputs = [
"authenticity_token",
"xsrfProtection",
"idpSSOEndpoint",
"spId",
"spName",
],
referer = SMP.url_auth,
post_data = body_sso,
body_prefix = "utf8=%E2%9C%93&" + urllib.parse.urlencode({
"targetUrl" : "",
"sourceUrl" : "",
"org" : "",
"mobileSSOToken" : "",
"tfaToken" : "",
"css" : "",
"SAMLRequest" : inputs_auth["SAMLRequest"],
"RelayState" : inputs_auth["RelayState"],
"j_username" : Config.credentials.sap_user,
"j_password" : Config.credentials.sap_password,
})
)
# --------------------------------------------------------
# STEP 4 - Obtain SAML response
# --------------------------------------------------------
(cookies_sso, inputs_sso, body_portal) = SMP.process_page(
url = SMP.url_sso,
desc = "obtain SAML response",
required_inputs = [
"SAMLResponse",
],
referer = SMP.url_sso,
post_data = body_sso,
body_prefix = "utf8=%E2%9C%93&" + urllib.parse.urlencode({
"RelayState": inputs_auth["RelayState"],
"authenticity_token": inputs_sso["authenticity_token"],
})
)
# --------------------------------------------------------
# STEP 5: Obtain JSESSION
# --------------------------------------------------------
(cookies_portal, inputs_portal, body_search) = SMP.process_page(
url = SMP.url_launchpad,
desc = "obtain session",
referer = SMP.url_portal,
post_data = body_portal,
allow_redirects = False,
)
@staticmethod
def search(query):
SMP.sess.headers["Accept"] = "application/json"
payload = {
"SEARCH_MAX_RESULT" : "500",
"RESULT_PER_PAGE" : "500",
"SEARCH_STRING" : query,
}
resp = SMP.sess.get(
SMP.url_search,
params=payload
)
assert(resp.status_code == 200), \
"Unable to retrieve search results; status = %d" % (resp.status_code)
j = json.loads(resp.content.decode("utf-8"))
assert("d" in j and "results" in j["d"]), \
"Invalid search result format"
results = j["d"]["results"]
return results
@staticmethod
def retrieve(params, os_filter):
SMP.sess.headers["Accept"] = "application/json"
print("retrieving %s for %s..." % (params, os_filter))
payload = {
"_EVENT" : "LIST",
"EVENT" : "LIST",
"PECCLSC" : "OS",
"INCL_PECCLSC1" : "OS",
"PECGRSC1" : os_filter,
}
# Setting this to SUPPORT PACKAGES & PATCHES for now;
# may need to update logic for future scenarios
payload.update(SMP.params_maint)
payload.update(params)
resp = SMP.sess.get(
SMP.url_retrieve,
params=payload,
)
assert(resp.status_code == 200), \
"Unable to retrieve search results; status = %d" % (resp.status_code)
j = json.loads(resp.content.decode("utf-8"))
assert("d" in j and "results" in j["d"]), \
"Invalid search result format"
results = j["d"]["results"]
return results
``` |
{
"source": "1LooperTrooper/Where2",
"score": 2
} |
#### File: where2_core/itinerary/views.py
```python
from django.shortcuts import render, redirect
from django.http import Http404
from django.views.generic import TemplateView
from . import weather_itinerary, data_format, get_weather
import threading
itinerary_current = weather_itinerary.Itinerary('current')
itinerary_full = weather_itinerary.Itinerary('full')
def check_weather():
#Checks the weather once per hour
get_weather.write_weather_data()
threading.Timer(3600, check_weather).start()
class HomeView(TemplateView):
#Homepage view
check_weather()
template_name = 'itinerary/main.html'
def itinerary_current_view(request):
#Weather Itinerary View
weather = get_weather.get_weather_data()
itinerary_current.getItinerary()
if weather == True:
weather_statement = 'Today is a nice Day'
else:
weather_statement = 'Today is a bad Day'
context = {
'Itinerary': itinerary_current.itinerary,
'Weather': weather_statement,
}
return render(request, 'itinerary/itinerary_page.html', context=context)
def itinerary_full_view(request):
#Weather Itinerary View
weather = get_weather.get_weather_data()
itinerary_full.getItinerary()
if weather == True:
weather_statement = 'Today is a nice Day'
else:
weather_statement = 'Today is a bad Day'
context = {
'Itinerary': itinerary_full.itinerary,
'Weather': weather_statement,
}
return render(request, 'itinerary/itinerary_page.html', context=context)
def activity_view(request):
#Activities page
resturants, activities = data_format.format_data()
weather = get_weather.get_weather_data()
if weather == True:
activities = activities[0]['Sunny']
else:
activities = activities[1]['Raining']
context = {
'Activities' : activities
}
return render(request, 'itinerary/activities.html', context=context)
def resturant_view(request):
#Resturants Page
resturants, activities = data_format.format_data()
breakfast_resturants = resturants[0]['Breakfast']
lunch_resturants = resturants[1]['Lunch']
dinner_resturants = resturants[2]['Dinner']
context = {
'Resturant': resturants,
'Breakfast_Resturants' : breakfast_resturants,
'Lunch_Resturants' : lunch_resturants,
'Dinner_Resturants' : dinner_resturants
}
return render(request, 'itinerary/resturants.html', context=context)
``` |
{
"source": "1Lorde/dataleak_detector",
"score": 3
} |
#### File: dataleak_detector/agent/api.py
```python
import os
import time
from hashlib import sha256
import requests
from config import management_host, management_port, local_db_filename, update_timeout
check_url = "http://{}:{}/devices/check".format(management_host, management_port)
allowed_url = "http://{}:{}/devices/allowed".format(management_host, management_port)
allowed_hash_url = "http://{}:{}/devices/allowed/hash".format(management_host, management_port)
def is_allowed_latest():
try:
if not os.path.exists(local_db_filename):
return False
response = requests.get(allowed_hash_url).text
with open(local_db_filename, 'r') as f:
hashes = f.read()
h = sha256()
h.update(hashes.encode('utf-8'))
if response == h.hexdigest():
print('[Auto updating task] Local database of allowed devices are latest')
return True
else:
print('[Auto updating task] Local database of allowed devices are outdated')
return False
except Exception as e:
print('[Auto updating task] Can`t get hash of allowed devices database, cause ' + str(type(e).__name__))
def update_allowed_drives():
while True:
if not is_allowed_latest():
try:
response = requests.get(allowed_url).text
with open(local_db_filename, 'w') as f:
f.writelines(response)
print('[Auto updating task] List of allowed devices successfully updated')
except Exception as e:
print('[Auto updating task] List of allowed devices not updated, cause ' + str(type(e).__name__))
time.sleep(update_timeout)
def offline_check(serial):
if not os.path.exists(local_db_filename):
print('Local database is empty. Drive {} are blocked'.format(serial))
return False
h = sha256()
h.update(serial.encode('utf-8'))
with open(local_db_filename, 'r') as f:
hashes = f.readline().split(';')
if not h.hexdigest() in hashes:
print('Drive {} not allowed'.format(serial))
return False
else:
return True
``` |
{
"source": "1Lorde/orders-tracker",
"score": 2
} |
#### File: orders_tracker/api/api.py
```python
import json
from flask import Blueprint, Response, request
from orders_tracker.models import Client, Device, OrderStatus, Staff
api_blueprint = Blueprint('api_bp', __name__)
@api_blueprint.route('/api/autocomplete/clients', methods=['GET'])
def client_autocomplete():
clients = [obj[0] for obj in Client.query.with_entities(Client.name).all()]
return Response(json.dumps(clients), mimetype='application/json')
@api_blueprint.route('/api/autocomplete/devices', methods=['GET'])
def serial_autocomplete():
client_name = request.args.get('client_name')
client_id = Client.query.filter_by(name=client_name).with_entities(Client.id).first()
if client_id is not None:
serials = [obj[0] for obj in Device.query.filter_by(client_id=client_id[0]).with_entities(Device.serial).all()]
return Response(json.dumps(serials), mimetype='application/json')
return Response(status=404)
@api_blueprint.route('/api/autocomplete/staff', methods=['GET'])
def staff_autocomplete():
staff = [obj[0] for obj in Staff.query.with_entities(Staff.name).all()]
return Response(json.dumps(staff), mimetype='application/json')
@api_blueprint.route('/api/order_status/<status_id>', methods=['GET'])
def order_status(status_id):
status_name = OrderStatus.query.filter_by(id=status_id).with_entities(OrderStatus.name).first()[0]
return status_name
``` |
{
"source": "1m0r74l17y/random-files",
"score": 3
} |
#### File: python/euler/20.py
```python
import math
def fuck(me):
stuff = 1
for i in range(me):
stuff *= (i+1)
return stuff
def sumfuck(trees):
sum = 0
for number in range(len(str(trees))):
add = str(trees)
sum += int(add[number])
return sum
test = input("number")
print sumfuck(fuck(test))
```
#### File: python/euler/7test.py
```python
option = input("Prime number?")
def isPrime(option):
if option % 2 == 0: return False
p = 3
while p < option**0.5+1:
if option % p == 0: return False
p += 2
return True
print(isPrime(option))
``` |
{
"source": "1m38/OnlineJudgeHelper",
"score": 3
} |
#### File: 1m38/OnlineJudgeHelper/oj_sites.py
```python
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import os
import sys
import codecs
import re
import collections
import requests
import parse
from bs4 import BeautifulSoup
import warnings
def detect_site(url):
re_list = collections.OrderedDict()
re_list["AtCoderBeta"] = re.compile(r"https?://atcoder\.jp")
re_list["AtCoder"] = re.compile(r"https?://.*\.atcoder\.jp")
re_list["Codeforces"] = re.compile(r"http://codeforces\.com")
re_list["Yukicoder"] = re.compile(r"http://yukicoder\.me")
for sitename, pattern in re_list.items():
if pattern.match(url):
return sitename
class ContestSite(object):
url_format = None
login_url = None
site_name = None
def __init__(self, url, config):
if self.site_name is None:
raise NotImplementedError
self.url = url
self.contest, self.pnumber = self.parse_url()
if self.contest is None:
self.contest = self.site_name
self.s = requests.Session()
if self.login_url is not None:
self._login(config)
self.page = self.get()
try:
self.testcases = self.parse_page(self.page)
except:
warnings.warn(
"Failed to load testcases, so create a blank sample file.",
RuntimeWarning)
self.testcases = []
def result(self):
return self.contest, self.pnumber, self.testcases
def parse_url(self):
if self.url_format is None:
raise NotImplementedError
r = parse.parse(self.url_format, self.url)
contest = r["contest"] if "contest" in r.named.keys() else None
pnumber = r["pnumber"] if "pnumber" in r.named.keys() else None
return contest, pnumber
def _login(self, config):
if self.site_name not in config["sites"]:
warnings.warn(
"User credential for contest site {} not in config.json".format(self.site_name),
RuntimeWarning)
return
else:
self.username = config["sites"][self.site_name]["username"]
self.password = config["sites"][self.site_name]["password"]
try:
login_status = self.login()
if not login_status:
raise RuntimeWarning("Failed to login")
except:
warnings.warn("Failed to login", RuntimeWarning)
def login(self):
raise NotImplementedError
def get(self):
r = self.s.get(self.url)
return r.text
def parse_page(self, page):
"""parse task page"""
"""return: testcases [[input_str, output_str], ...]"""
raise NotImplementedError
class AtCoderBeta(ContestSite):
url_format = "https://atcoder.jp/contests/{contest}/tasks/{pnumber}"
login_url = "https://atcoder.jp/login"
site_name = "AtCoder"
def login(self):
payload = {"username": self.username,
"password": <PASSWORD>}
url = self.login_url
# get csrf-token
r = self.s.get(url)
soup = BeautifulSoup(r.text, "html.parser")
rform = soup.find_all("form")[-1]
tokens = rform.find_all("input", type="hidden")
for token in tokens:
payload[token.attrs["name"]] = token.attrs["value"]
r = self.s.post(url, payload)
if r.status_code != 200:
warnings.warn("Login request returns status code {}".format(r.status_code),
RuntimeWarning)
elif r.url == "https://atcoder.jp/login":
warnings.warn("Failed to login. Is username or password incorrect?".format(r.status_code),
RuntimeWarning)
else:
return True
return False
def parse_page(self, page):
testcases = []
soup = BeautifulSoup(page, "html.parser")
task = soup.find("div", {"id": "task-statement"})
task_ja = task.find("span", {"class": "lang-ja"})
if not task_ja:
task_ja = task
sections = task_ja.find_all("section")
# <section>のうち、「Sample Input」or「入力例」から始まる<h3>を持つものを抽出
s_sample_inputs = \
[s for s in sections if s.h3 and "Sample Input" in s.h3.text or "入力例" in s.h3.text]
s_sample_outputs = \
[s for s in sections if s.h3 and "Sample Output" in s.h3.text or "出力例" in s.h3.text]
for s_in, s_out in zip(s_sample_inputs, s_sample_outputs):
if not (s_in.find("pre") and s_out.find("pre")):
continue
input_str = s_in.pre.text.replace("\r\n", "\n").strip() + "\n"
output_str = s_out.pre.text.replace("\r\n", "\n").strip() + "\n"
testcases.append([input_str, output_str])
return testcases
class AtCoder(ContestSite):
url_format = "https://{contest}.contest.atcoder.jp/tasks/{pnumber}"
login_url = "https://{contest}.contest.atcoder.jp/login"
site_name = "AtCoder"
def login(self):
payload = {"name": self.username,
"password": <PASSWORD>}
url = self.login_url.format(contest=self.contest)
r = self.s.post(url, payload)
if r.status_code != 200:
warnings.warn("Login request returns status code {}".format(r.status_code),
RuntimeWarning)
else:
if "X-ImoJudge-SimpleAuth" in r.headers and \
r.headers["X-ImoJudge-SimpleAuth"] == "Passed":
return True
return False
def parse_page(self, page):
testcases = []
soup = BeautifulSoup(page, "html.parser")
task = soup.find("div", {"id": "task-statement"})
task_ja = task.find("span", {"class": "lang-ja"})
if not task_ja:
task_ja = task
pres = task_ja.findAll("pre")
n_pres = len(pres)
for i in range(1, n_pres, 2):
input_str = pres[i].text.replace("\r\n", "\n").strip() + "\n"
output_str = pres[i+1].text.replace("\r\n", "\n").strip() + "\n"
testcases.append([input_str, output_str])
return testcases
class Codeforces(ContestSite):
url_format = "http://codeforces.com/contest/{contest}/problem/{pnumber}"
# login_url = "http://codeforces.com/enter"
login_url = None
site_name = "Codeforces"
def parse_page(self, page):
testcases = []
soup = BeautifulSoup(page, "html.parser")
sample_test = soup.select("div.sample-test")[0]
inputs = sample_test.find_all("div", class_="input")
outputs = sample_test.find_all("div", class_="output")
for i, o in zip(inputs, outputs):
pre_i = i.find("pre")
pre_o = o.find("pre")
for br in pre_i.find_all("br"):
br.replace_with("\n")
for br in pre_o.find_all("br"):
br.replace_with("\n")
str_i = pre_i.text
str_o = pre_o.text
testcases.append([str_i, str_o])
return testcases
class Yukicoder(ContestSite):
url_format = "http://yukicoder.me/problems/no/{pnumber}"
login_url = None
site_name = "yukicoder"
def parse_page(self, page):
testcases = []
soup = BeautifulSoup(page, "html.parser")
samples = soup.find_all("div", class_="sample")
for sample in samples:
pres = sample.find_all("pre")
pre_i = pres[0]
pre_o = pres[1]
str_i = pre_i.text + "\n"
str_o = pre_o.text + "\n"
testcases.append([str_i, str_o])
return testcases
``` |
{
"source": "1mackenziekyle/cover-letters",
"score": 3
} |
#### File: 1mackenziekyle/cover-letters/task.py
```python
from RPA.Browser.Selenium import Selenium
import time
import pandas as pd
from bs4 import BeautifulSoup
import json
lib = Selenium()
# Global vars
fullStackJson = {}
frontendJson = {}
backendJson = {}
import json
with open('input/userinfo.json', 'r') as f:
userJsonInfo = json.load(f)
# Function: Open and navigate to SCOPE
def navigate_to_browser(ubc_user, ubc_pw) -> None:
# OPEN BROWSER
lib.open_available_browser("https://scope.sciencecoop.ubc.ca/students/cwl-current-student-login.htm")
lib.set_browser_implicit_wait(10)
# SCOPE LOGIN
login_button = lib.click_link("xpath:/html/body/div/main/div/div/a")
lib.wait_until_element_is_visible('xpath:/html/body/div[1]/div/div/div/div/h1')
# CWL LOGIN
username_input_field = "xpath://*[@id='username']"
password_input_field = "xpath://*[@id='password']"
pause(0.1)
lib.input_text_when_element_is_visible(username_input_field, ubc_user)
pause(0.1)
lib.input_text_when_element_is_visible(password_input_field, ubc_pw)
lib.click_button('xpath://*[@id="col2"]/form/div[3]/button')
# SCOPE HOMEPAGE
pause(0.5)
lib.click_button_when_visible('xpath:/html/body/div[2]/header/div[4]/div/div/button')
pause(1)
lib.click_button_when_visible('xpath:/html/body/div[2]/header/div[3]/div[1]/nav/ul/li[2]/button')
pause(1)
lib.click_element_when_visible('xpath://*[@id="myAccountNav"]/nav/ul/li[2]/ul/li[2]/a')
pause(3)
lib.click_element_when_visible('xpath://*[@id="quickSearchCountsContainer"]/table[2]/tbody/tr[1]/td[2]/a')
pause(1)
# Function: Pause
def pause(secs) -> None:
time.sleep(secs)
def writeDataToDict(index) -> None:
# write json object
job_data = {
'job_title': table['Job Title'].iloc[index],
'organization' : table['Organization'].iloc[index],
'location': table['Location'].iloc[index],
'app_deadline': table['App Deadline'].iloc[index]
}
# Frontend?
for word in ['web', 'front', 'app', 'ui', 'ux']:
if word in job_data['job_title'].lower():
job_data['type'] = 'front-end'
frontendJson[job_data['job_title'] + '-' + job_data['organization']] = job_data
return
# Backend?
for word in ['data', 'science', 'back', 'machine', 'ops', 'firmware', 'embedded', 'systems']:
if word in job_data['job_title'].lower():
job_data['type'] = 'back-end'
backendJson[job_data['job_title'] + '-' + job_data['organization']] = job_data
return
# Fullstack?
job_data['type'] = 'full-stack'
fullStackJson[job_data['job_title'] + '-' + job_data['organization']] = job_data
return
# Main function
def main(ubc_user, ubc_pw) -> None:
try:
# navigate
navigate_to_browser(ubc_user=ubc_user, ubc_pw=ubc_pw)
# get data
soup = BeautifulSoup(lib.driver.page_source, "html.parser")
global table
table = pd.read_html(lib.driver.page_source)[0]
# modify data
lib.close_all_browsers()
table.insert(1, "Job Type", "")
print("Writing JSON...")
# Write JSON to array
for index in range(len(table)):
writeDataToDict(index)
# Write to file
with open('output/json/fullstack.json', 'w') as outfile:
outfile.write(json.dumps(fullStackJson, indent=4))
with open('output/json/frontend.json', 'w') as outfile:
outfile.write(json.dumps(frontendJson, indent=4))
with open('output/json/backend.json', 'w') as outfile:
outfile.write(json.dumps(backendJson, indent=4))
except Exception as e:
print(e)
# Main Method
if __name__ == "__main__":
username = userJsonInfo['ubc_username']
pw = userJsonInfo['ubc_password']
main(username, pw)
``` |
{
"source": "1maginasian/PaddleHub",
"score": 2
} |
#### File: paddlehub/datasets/base_nlp_dataset.py
```python
from typing import Dict, List, Optional, Union, Tuple
import csv
import io
import os
import numpy as np
import paddle
from paddlehub.env import DATA_HOME
from paddlehub.text.bert_tokenizer import BertTokenizer
from paddlehub.text.tokenizer import CustomTokenizer
from paddlehub.utils.log import logger
from paddlehub.utils.utils import download
from paddlehub.utils.xarfile import is_xarfile, unarchive
class InputExample(object):
"""
The input data structure of Transformer modules (BERT, ERNIE and so on).
"""
def __init__(self, guid: int, text_a: str, text_b: Optional[str] = None, label: Optional[str] = None):
"""
The input data structure.
Args:
guid (:obj:`int`):
Unique id for the input data.
text_a (:obj:`str`, `optional`, defaults to :obj:`None`):
The first sequence. For single sequence tasks, only this sequence must be specified.
text_b (:obj:`str`, `optional`, defaults to :obj:`None`):
The second sequence if sentence-pair.
label (:obj:`str`, `optional`, defaults to :obj:`None`):
The label of the example.
Examples:
.. code-block:: python
from paddlehub.datasets.base_nlp_dataset import InputExample
example = InputExample(guid=0,
text_a='15.4寸笔记本的键盘确实爽,基本跟台式机差不多了',
text_b='蛮喜欢数字小键盘,输数字特方便,样子也很美观,做工也相当不错',
label='1')
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
def __str__(self):
if self.text_b is None:
return "text={}\tlabel={}".format(self.text_a, self.label)
else:
return "text_a={}\ttext_b={},label={}".format(self.text_a, self.text_b, self.label)
class BaseNLPDataset(object):
"""
The virtual base class for nlp datasets, such TextClassificationDataset, SeqLabelingDataset, and so on.
The base class must be supered and re-implemented the method _read_file.
"""
def __init__(self,
base_path: str,
tokenizer: Union[BertTokenizer, CustomTokenizer],
max_seq_len: Optional[int] = 128,
mode: Optional[str] = "train",
data_file: Optional[str] = None,
label_file: Optional[str] = None,
label_list: Optional[List[str]] = None):
"""
Ags:
base_path (:obj:`str`): The directory to the whole dataset.
tokenizer (:obj:`BertTokenizer` or :obj:`CustomTokenizer`):
It tokenizes the text and encodes the data as model needed.
max_seq_len (:obj:`int`, `optional`, defaults to :128):
If set to a number, will limit the total sequence returned so that it has a maximum length.
mode (:obj:`str`, `optional`, defaults to `train`):
It identifies the dataset mode (train, test or dev).
data_file(:obj:`str`, `optional`, defaults to :obj:`None`):
The data file name, which is relative to the base_path.
label_file(:obj:`str`, `optional`, defaults to :obj:`None`):
The label file name, which is relative to the base_path.
It is all labels of the dataset, one line one label.
label_list(:obj:`List[str]`, `optional`, defaults to :obj:`None`):
The list of all labels of the dataset
"""
self.data_file = os.path.join(base_path, data_file)
self.label_list = label_list
self.mode = mode
self.tokenizer = tokenizer
self.max_seq_len = max_seq_len
if label_file:
self.label_file = os.path.join(base_path, label_file)
if not self.label_list:
self.label_list = self._load_label_data()
else:
logger.warning("As label_list has been assigned, label_file is noneffective")
if self.label_list:
self.label_map = {item: index for index, item in enumerate(self.label_list)}
def _load_label_data(self):
"""
Loads labels from label file.
"""
if os.path.exists(self.label_file):
with open(self.label_file, "r", encoding="utf8") as f:
return f.read().strip().split("\n")
else:
raise RuntimeError("The file {} is not found.".format(self.label_file))
def _download_and_uncompress_dataset(self, destination: str, url: str):
"""
Downloads dataset and uncompresses it.
Args:
destination (:obj:`str`): The dataset cached directory.
url (:obj: str): The link to be downloaded a dataset.
"""
if not os.path.exists(destination):
dataset_package = download(url=url, path=DATA_HOME)
if is_xarfile(dataset_package):
unarchive(dataset_package, DATA_HOME)
else:
logger.info("Dataset {} already cached.".format(destination))
def _read_file(self, input_file: str, is_file_with_header: bool = False):
"""
Reads the files.
Args:
input_file (:obj:str) : The file to be read.
is_file_with_header(:obj:bool, `optional`, default to :obj: False) :
Whether or not the file is with the header introduction.
"""
raise NotImplementedError
def get_labels(self):
"""
Gets all labels.
"""
return self.label_list
class TextClassificationDataset(BaseNLPDataset, paddle.io.Dataset):
"""
The dataset class which is fit for all datatset of text classification.
"""
def __init__(self,
base_path: str,
tokenizer: Union[BertTokenizer, CustomTokenizer],
max_seq_len: int = 128,
mode: str = "train",
data_file: str = None,
label_file: str = None,
label_list: list = None,
is_file_with_header: bool = False):
"""
Ags:
base_path (:obj:`str`): The directory to the whole dataset.
tokenizer (:obj:`BertTokenizer` or :obj:`CustomTokenizer`):
It tokenizes the text and encodes the data as model needed.
max_seq_len (:obj:`int`, `optional`, defaults to :128):
If set to a number, will limit the total sequence returned so that it has a maximum length.
mode (:obj:`str`, `optional`, defaults to `train`):
It identifies the dataset mode (train, test or dev).
data_file(:obj:`str`, `optional`, defaults to :obj:`None`):
The data file name, which is relative to the base_path.
label_file(:obj:`str`, `optional`, defaults to :obj:`None`):
The label file name, which is relative to the base_path.
It is all labels of the dataset, one line one label.
label_list(:obj:`List[str]`, `optional`, defaults to :obj:`None`):
The list of all labels of the dataset
is_file_with_header(:obj:bool, `optional`, default to :obj: False) :
Whether or not the file is with the header introduction.
"""
super(TextClassificationDataset, self).__init__(
base_path=base_path,
tokenizer=tokenizer,
max_seq_len=max_seq_len,
mode=mode,
data_file=data_file,
label_file=label_file,
label_list=label_list)
self.examples = self._read_file(self.data_file, is_file_with_header)
self.records = self._convert_examples_to_records(self.examples)
def _read_file(self, input_file, is_file_with_header: bool = False) -> List[InputExample]:
"""
Reads a tab separated value file.
Args:
input_file (:obj:str) : The file to be read.
is_file_with_header(:obj:bool, `optional`, default to :obj: False) :
Whether or not the file is with the header introduction.
Returns:
examples (:obj:`List[InputExample]`): All the input data.
"""
if not os.path.exists(input_file):
raise RuntimeError("The file {} is not found.".format(input_file))
else:
with io.open(input_file, "r", encoding="UTF-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=None)
examples = []
seq_id = 0
header = next(reader) if is_file_with_header else None
for line in reader:
example = InputExample(guid=seq_id, label=line[0], text_a=line[1])
seq_id += 1
examples.append(example)
return examples
def _convert_examples_to_records(self, examples: List[InputExample]) -> List[dict]:
"""
Converts all examples to records which the model needs.
Args:
examples(obj:`List[InputExample]`): All data examples returned by _read_file.
Returns:
records(:obj:`List[dict]`): All records which the model needs.
"""
records = []
for example in examples:
record = self.tokenizer.encode(text=example.text_a, text_pair=example.text_b, max_seq_len=self.max_seq_len)
# CustomTokenizer will tokenize the text firstly and then lookup words in the vocab
# When all words are not found in the vocab, the text will be dropped.
if not record:
logger.info(
"The text %s has been dropped as it has no words in the vocab after tokenization." % example.text_a)
continue
if example.label:
record['label'] = self.label_map[example.label]
records.append(record)
return records
def __getitem__(self, idx):
record = self.records[idx]
if 'label' in record.keys():
return np.array(record['input_ids']), np.array(record['segment_ids']), np.array(record['label'], dtype=np.int64)
else:
return np.array(record['input_ids']), np.array(record['segment_ids'])
def __len__(self):
return len(self.records)
class SeqLabelingDataset(BaseNLPDataset, paddle.io.Dataset):
def __init__(self,
base_path: str,
tokenizer: Union[BertTokenizer, CustomTokenizer],
max_seq_len: int = 128,
mode: str = "train",
data_file: str = None,
label_file: str = None,
label_list: list = None,
split_char: str ="\002",
no_entity_label: str = "O",
ignore_label: int = -100,
is_file_with_header: bool = False):
super(SeqLabelingDataset, self).__init__(
base_path=base_path,
tokenizer=tokenizer,
max_seq_len=max_seq_len,
mode=mode,
data_file=data_file,
label_file=label_file,
label_list=label_list)
self.no_entity_label = no_entity_label
self.split_char = split_char
self.ignore_label = ignore_label
self.examples = self._read_file(self.data_file, is_file_with_header)
self.records = self._convert_examples_to_records(self.examples)
def _read_file(self, input_file, is_file_with_header: bool = False) -> List[InputExample]:
"""Reads a tab separated value file."""
if not os.path.exists(input_file):
raise RuntimeError("The file {} is not found.".format(input_file))
else:
with io.open(input_file, "r", encoding="UTF-8") as f:
reader = csv.reader(f, delimiter="\t", quotechar=None)
examples = []
seq_id = 0
header = next(reader) if is_file_with_header else None
for line in reader:
example = InputExample(guid=seq_id, label=line[1], text_a=line[0])
seq_id += 1
examples.append(example)
return examples
def _convert_examples_to_records(self, examples: List[InputExample]) -> List[dict]:
"""
Returns a list[dict] including all the input information what the model need.
Args:
examples (list): the data examples, returned by _read_file.
Returns:
a list with all the examples record.
"""
records = []
for example in examples:
tokens, labels = self._reseg_token_label(
tokens=example.text_a.split(self.split_char),
labels=example.label.split(self.split_char))
record = self.tokenizer.encode(
text=tokens, max_seq_len=self.max_seq_len)
# CustomTokenizer will tokenize the text firstly and then lookup words in the vocab
# When all words are not found in the vocab, the text will be dropped.
if not record:
logger.info(
"The text %s has been dropped as it has no words in the vocab after tokenization."
% example.text_a)
continue
if labels:
record["label"] = []
tokens_with_specical_token = self.tokenizer.convert_ids_to_tokens(record['input_ids'])
tokens_index = 0
for token in tokens_with_specical_token:
if tokens_index < len(
tokens) and token == tokens[tokens_index]:
record["label"].append(
self.label_list.index(labels[tokens_index]))
tokens_index += 1
elif token in [self.tokenizer.pad_token]:
record["label"].append(self.ignore_label) # label of special token
else:
record["label"].append(
self.label_list.index(self.no_entity_label))
records.append(record)
return records
def _reseg_token_label(
self, tokens: List[str], labels: List[str] = None) -> Tuple[List[str], List[str]] or List[str]:
if labels:
if len(tokens) != len(labels):
raise ValueError(
"The length of tokens must be same with labels")
ret_tokens = []
ret_labels = []
for token, label in zip(tokens, labels):
sub_token = self.tokenizer(token)
if len(sub_token) == 0:
continue
ret_tokens.extend(sub_token)
ret_labels.append(label)
if len(sub_token) < 2:
continue
sub_label = label
if label.startswith("B-"):
sub_label = "I-" + label[2:]
ret_labels.extend([sub_label] * (len(sub_token) - 1))
if len(ret_tokens) != len(ret_labels):
raise ValueError(
"The length of ret_tokens can't match with labels")
return ret_tokens, ret_labels
else:
ret_tokens = []
for token in tokens:
sub_token = self.tokenizer(token)
if len(sub_token) == 0:
continue
ret_tokens.extend(sub_token)
if len(sub_token) < 2:
continue
return ret_tokens, None
def __getitem__(self, idx):
record = self.records[idx]
if 'label' in record.keys():
return np.array(record['input_ids']), np.array(record['segment_ids']), np.array(record['seq_len']), np.array(record['label'], dtype=np.int64)
else:
return np.array(record['input_ids']), np.array(record['segment_ids']), np.array(record['seq_len'])
def __len__(self):
return len(self.records)
``` |
{
"source": "1Mans/test_task",
"score": 2
} |
#### File: test_task/departments/models.py
```python
from django.conf import settings
from django.db import models
class Department(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
def __str__(self):
return self.name
class Rank(models.Model):
name = models.CharField(max_length=200)
description = models.TextField()
def __str__(self):
return self.name
class Employee(models.Model):
first_name = models.CharField(max_length=200)
last_name = models.CharField(max_length=200)
middle_name = models.CharField(max_length=200)
HIGH = "Высшее"
MEDIUM = "Средне-специальное"
SCHOOL = "Среднее"
YEAR_IN_SCHOOL_CHOICES = [
(HIGH, "Высшее"),
(MEDIUM, "Средне-специальное"),
(SCHOOL, "Среднее"),
]
year_in_school = models.CharField(
max_length=20,
choices=YEAR_IN_SCHOOL_CHOICES,
)
years_worked = models.IntegerField()
dob = models.DateField()
date_accepted = models.DateField()
department = models.ForeignKey(
Department, on_delete=models.SET_NULL, null=True, blank=False
)
rank = models.ForeignKey(Rank, on_delete=models.SET_NULL, null=True, blank=False)
def __str__(self):
return self.first_name
``` |
{
"source": "1marc1/pymp",
"score": 3
} |
#### File: 1marc1/pymp/pymp.py
```python
import os
import tempfile
import logging
import logging.handlers
import multiprocessing
import pymp_global as gv
import pymp_common as dc
def init_log():
_logfile = os.path.join(tempfile.gettempdir(),gv.GeneralLogFileName)
gv.logger = logging.getLogger('pymp')
gv.logger.setLevel(logging.DEBUG)
file_handler = logging.handlers.RotatingFileHandler(_logfile, maxBytes=gv.GeneralLogSize, backupCount=gv.GeneralLogCount)
file_handler.setFormatter(logging.Formatter(gv.GeneralLogFormat))
gv.logger.addHandler(file_handler)
def MyFunction():
gv.logger.info('Starting MyFunction')
dc.MPFunction()
if __name__ == "__main__":
multiprocessing.freeze_support()
gv.GeneralLogFileName = 'pymp.log'
init_log()
gv.logger.info('Starting pymp')
MyFunction()
``` |
{
"source": "1marc1/reportlab",
"score": 3
} |
#### File: reportlab/lib/units.py
```python
__version__=''' $Id: units.py 3342 2008-12-12 15:55:34Z andy $ '''
__doc__='''Defines inch, cm, mm etc as multiples of a point
You can now in user-friendly units by doing::
from reportlab.lib.units import inch
r = Rect(0, 0, 3 * inch, 6 * inch)
'''
inch = 72.0
cm = inch / 2.54
mm = cm * 0.1
pica = 12.0
def toLength(s):
'''convert a string to a length'''
try:
if s[-2:]=='cm': return float(s[:-2])*cm
if s[-2:]=='in': return float(s[:-2])*inch
if s[-2:]=='pt': return float(s[:-2])
if s[-1:]=='i': return float(s[:-1])*inch
if s[-2:]=='mm': return float(s[:-2])*mm
if s[-4:]=='pica': return float(s[:-4])*pica
return float(s)
except:
raise ValueError, "Can't convert '%s' to length" % s
```
#### File: reportlab/tests/test_lib_sequencer.py
```python
__version__='''$Id: test_lib_sequencer.py 3288 2008-09-15 11:03:17Z rgbecker $'''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, printLocation
setOutDir(__name__)
import sys, random
import unittest
from reportlab.lib.sequencer import Sequencer
class SequencerTestCase(unittest.TestCase):
"Test Sequencer usage."
def test0(self):
"Test sequencer default counter."
seq = Sequencer()
msg = 'Initial value is not zero!'
assert seq._this() == 0, msg
def test1(self):
"Test incrementing default counter."
seq = Sequencer()
for i in range(1, 101):
n = seq.next()
msg = 'Sequence value is not correct!'
assert seq._this() == n, msg
def test2(self):
"Test resetting default counter."
seq = Sequencer()
start = seq._this()
for i in range(1, 101):
n = seq.next()
seq.reset()
msg = 'Sequence value not correctly reset!'
assert seq._this() == start, msg
def test3(self):
"Test incrementing dedicated counter."
seq = Sequencer()
for i in range(1, 101):
n = seq.next('myCounter1')
msg = 'Sequence value is not correct!'
assert seq._this('myCounter1') == n, msg
def test4(self):
"Test resetting dedicated counter."
seq = Sequencer()
start = seq._this('myCounter1')
for i in range(1, 101):
n = seq.next('myCounter1')
seq.reset('myCounter1')
msg = 'Sequence value not correctly reset!'
assert seq._this('myCounter1') == start, msg
def test5(self):
"Test incrementing multiple dedicated counters."
seq = Sequencer()
startMyCounter0 = seq._this('myCounter0')
startMyCounter1 = seq._this('myCounter1')
for i in range(1, 101):
n = seq.next('myCounter0')
msg = 'Sequence value is not correct!'
assert seq._this('myCounter0') == n, msg
m = seq.next('myCounter1')
msg = 'Sequence value is not correct!'
assert seq._this('myCounter1') == m, msg
## def testRandom(self):
## "Test randomly manipulating multiple dedicated counters."
##
## seq = Sequencer()
## counterNames = ['c0', 'c1', 'c2', 'c3']
##
## # Init.
## for cn in counterNames:
## setattr(self, cn, seq._this(cn))
## msg = 'Counter start value is not correct!'
## assert seq._this(cn) == 0, msg
##
## # Increment/decrement.
## for i in range(1, 101):
## n = seq.next('myCounter0')
## msg = 'Sequence value is not correct!'
## assert seq._this('myCounter0') == n, msg
## m = seq.next('myCounter1')
## msg = 'Sequence value is not correct!'
## assert seq._this('myCounter1') == m, msg
def makeSuite():
return makeSuiteForClasses(SequencerTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
```
#### File: reportlab/tests/test_pdfgen_links.py
```python
__version__='''$Id: test_pdfgen_links.py 3288 2008-09-15 11:03:17Z rgbecker $'''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
#
# Fit tests
#
# Modification History
# ====================
#
# 11-Mar-2003 <NAME>
# * Initial version.
#
#
from reportlab.pdfgen import canvas
from reportlab.lib.units import inch
from reportlab.lib.pagesizes import letter
from reportlab.lib import colors
import unittest
def markPage(c,height=letter[1],width=letter[0]):
height = height / inch
width = width / inch
for y in range(int(height)):
for x in range(int(width)):
c.drawString(x*inch,y*inch,"x=%d y=%d" % (x,y) )
c.line(x*inch,0,x*inch,height*inch)
c.line(0,y*inch,width*inch,y*inch)
fn = outputfile("test_pdfgen_links.pdf")
class LinkTestCase(unittest.TestCase):
"Test classes."
def test1(self):
c = canvas.Canvas(fn,pagesize=letter)
#Page 1
c.setFont("Courier", 10)
markPage(c)
c.bookmarkPage("P1")
c.addOutlineEntry("Page 1","P1")
#Note : XYZ Left is ignored because at this zoom the whole page fits the screen
c.bookmarkPage("P1_XYZ",fit="XYZ",top=7*inch,left=3*inch,zoom=0.5)
c.addOutlineEntry("Page 1 XYZ #1 (top=7,left=3,zoom=0.5)","P1_XYZ",level=1)
c.bookmarkPage("P1_XYZ2",fit="XYZ",top=7*inch,left=3*inch,zoom=5)
c.addOutlineEntry("Page 1 XYZ #2 (top=7,left=3,zoom=5)","P1_XYZ2",level=1)
c.bookmarkPage("P1_FIT",fit="Fit")
c.addOutlineEntry("Page 1 Fit","P1_FIT",level=1)
c.bookmarkPage("P1_FITH",fit="FitH",top=2*inch)
c.addOutlineEntry("Page 1 FitH (top = 2 inch)","P1_FITH",level=1)
c.bookmarkPage("P1_FITV",fit="FitV",left=3*inch)
c.addOutlineEntry("Page 1 FitV (left = 3 inch)","P1_FITV",level=1)
c.bookmarkPage("P1_FITR",fit="FitR",left=1*inch,bottom=2*inch,right=5*inch,top=6*inch)
c.addOutlineEntry("Page 1 FitR (left=1,bottom=2,right=5,top=6)","P1_FITR",level=1)
c.bookmarkPage("P1_FORWARD")
c.addOutlineEntry("Forward References","P1_FORWARD",level=2)
c.addOutlineEntry("Page 3 XYZ (top=7,left=3,zoom=0)","P3_XYZ",level=3)
#Create link to FitR on page 3
c.saveState()
c.setFont("Courier", 14)
c.setFillColor(colors.blue)
c.drawString(inch+20,inch+20,"Click to jump to the meaning of life")
c.linkAbsolute("","MOL",(inch+10,inch+10,6*inch,2*inch))
c.restoreState()
#Create linkAbsolute to page 2
c.saveState()
c.setFont("Courier", 14)
c.setFillColor(colors.green)
c.drawString(4*inch,4*inch,"Jump to 2.5 inch position on page 2")
c.linkAbsolute("","HYPER_1",(3.75*inch,3.75*inch,8.25*inch,4.25*inch))
c.restoreState()
c.showPage()
#Page 2
c.setFont("Helvetica", 10)
markPage(c)
c.bookmarkPage("P2")
c.addOutlineEntry("Page 2","P2")
#Note : This time left will be at 3*inch because the zoom makes the page to big to fit
c.bookmarkPage("P2_XYZ",fit="XYZ",top=7*inch,left=3*inch,zoom=2)
c.addOutlineEntry("Page 2 XYZ (top=7,left=3,zoom=2.0)","P2_XYZ",level=1)
c.bookmarkPage("P2_FIT",fit="Fit")
c.addOutlineEntry("Page 2 Fit","P2_FIT",level=1)
c.bookmarkPage("P2_FITH",fit="FitH",top=2*inch)
c.addOutlineEntry("Page 2 FitH (top = 2 inch)","P2_FITH",level=1)
c.bookmarkPage("P2_FITV",fit="FitV",left=10*inch)
c.addOutlineEntry("Page 2 FitV (left = 10 inch)","P2_FITV",level=1)
c.bookmarkPage("P2_FITR",fit="FitR",left=1*inch,bottom=2*inch,right=5*inch,top=6*inch)
c.addOutlineEntry("Page 2 FitR (left=1,bottom=2,right=5,top=6)","P2_FITR",level=1)
c.bookmarkPage("P2_FORWARD")
c.addOutlineEntry("Forward References","P2_FORWARD",level=2)
c.addOutlineEntry("Page 3 XYZ (top=7,left=3,zoom=0)","P3_XYZ",level=3)
c.bookmarkPage("P2_BACKWARD")
c.addOutlineEntry("Backward References","P2_BACKWARD",level=2)
c.addOutlineEntry("Page 1 Fit","P1_FIT",level=3)
c.addOutlineEntry("Page 1 FitR (left=1,bottom=2,right=5,top=6)","P1_FITR",level=3)
#Horizontal absolute test from page 1. Note that because of the page size used on page 3 all this will do
#is put the view centered on the bookmark. If you want to see it "up close and personal" change page3 to be
#the same page size as the other pages.
c.saveState()
c.setFont("Courier", 14)
c.setFillColor(colors.green)
c.drawString(2.5*inch,2.5*inch,"This line is hyperlinked from page 1")
# c.bookmarkHorizontalAbsolute("HYPER_1",3*inch) #slightly higher than the text otherwise text is of screen above.
c.bookmarkPage("HYPER_1",fit="XYZ",top=2.5*inch,bottom=2*inch)
c.restoreState()
#
c.showPage()
#Page 3
c.setFont("Times-Roman", 10)
#Turn the page on its size and make it 2* the normal "width" in order to have something to test FitV against.
c.setPageSize((2*letter[1],letter[0]))
markPage(c,height=letter[0],width=2*letter[1])
c.bookmarkPage("P3")
c.addOutlineEntry("Page 3 (Double-wide landscape page)","P3")
#Note : XYZ with no zoom (set it to something first
c.bookmarkPage("P3_XYZ",fit="XYZ",top=7*inch,left=3*inch,zoom=0)
c.addOutlineEntry("Page 3 XYZ (top=7,left=3,zoom=0)","P3_XYZ",level=1)
#FitV works here because the page is so wide it can"t all fit on the page
c.bookmarkPage("P3_FITV",fit="FitV",left=10*inch)
c.addOutlineEntry("Page 3 FitV (left = 10 inch)","P3_FITV",level=1)
c.bookmarkPage("P3_BACKWARD")
c.addOutlineEntry("Backward References","P3_BACKWARD",level=2)
c.addOutlineEntry("Page 1 XYZ #1 (top=7,left=3,zoom=0.5)","P1_XYZ",level=3)
c.addOutlineEntry("Page 1 XYZ #2 (top=7,left=3,zoom=5)","P1_XYZ2",level=3)
c.addOutlineEntry("Page 2 FitV (left = 10 inch)","P2_FITV",level=3)
#Add link from page 1
c.saveState()
c.setFont("Courier", 40)
c.setFillColor(colors.green)
c.drawString(5*inch,6*inch,"42")
c.bookmarkPage("MOL",fit="FitR",left=4*inch,top=7*inch,bottom=4*inch,right=6*inch)
c.showOutline()
c.save()
def makeSuite():
return makeSuiteForClasses(LinkTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
print "wrote", fn
printLocation()
```
#### File: reportlab/tests/test_platypus_toc.py
```python
__version__='''$Id: test_platypus_toc.py 3708 2010-05-12 14:49:43Z andy $'''
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import sys, os
from os.path import join, basename, splitext
from math import sqrt
import random
import unittest
from reportlab.lib.units import inch, cm
from reportlab.lib.pagesizes import A4
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.xpreformatted import XPreformatted
from reportlab.platypus.frames import Frame
from reportlab.platypus.doctemplate \
import PageTemplate, BaseDocTemplate
from reportlab.platypus import tableofcontents, PageBreak
from reportlab.lib import randomtext
def myMainPageFrame(canvas, doc):
"The page frame used for all PDF documents."
canvas.saveState()
canvas.rect(2.5*cm, 2.5*cm, 15*cm, 25*cm)
canvas.setFont('Times-Roman', 12)
pageNumber = canvas.getPageNumber()
canvas.drawString(10*cm, cm, str(pageNumber))
canvas.restoreState()
class MyDocTemplate(BaseDocTemplate):
"The document template used for all PDF documents."
_invalidInitArgs = ('pageTemplates',)
def __init__(self, filename, **kw):
frame1 = Frame(2.5*cm, 2.5*cm, 15*cm, 25*cm, id='F1')
self.allowSplitting = 0
BaseDocTemplate.__init__(self, filename, **kw)
template = PageTemplate('normal', [frame1], myMainPageFrame)
self.addPageTemplates(template)
def afterFlowable(self, flowable):
"Registers TOC entries."
if flowable.__class__.__name__ == 'Paragraph':
styleName = flowable.style.name
if styleName[:7] == 'Heading':
key = str(hash(flowable))
self.canv.bookmarkPage(key)
# Register TOC entries.
level = int(styleName[7:])
text = flowable.getPlainText()
pageNum = self.page
# Try calling this with and without a key to test both
# Entries of every second level will have links, others won't
if level % 2 == 1:
self.notify('TOCEntry', (level, text, pageNum, key))
else:
self.notify('TOCEntry', (level, text, pageNum))
def makeHeaderStyle(level, fontName='Times-Roman'):
"Make a header style for different levels."
assert level >= 0, "Level must be >= 0."
PS = ParagraphStyle
size = 24.0 / sqrt(1+level)
style = PS(name = 'Heading' + str(level),
fontName = fontName,
fontSize = size,
leading = size*1.2,
spaceBefore = size/4.0,
spaceAfter = size/8.0)
return style
def makeBodyStyle():
"Body text style - the default will do"
return ParagraphStyle('body')
def makeTocHeaderStyle(level, delta, epsilon, fontName='Times-Roman'):
"Make a header style for different levels."
assert level >= 0, "Level must be >= 0."
PS = ParagraphStyle
size = 12
style = PS(name = 'Heading' + str(level),
fontName = fontName,
fontSize = size,
leading = size*1.2,
spaceBefore = size/4.0,
spaceAfter = size/8.0,
firstLineIndent = -epsilon,
leftIndent = level*delta + epsilon)
return style
class TocTestCase(unittest.TestCase):
"Test TableOfContents class (eyeball-test)."
def test0(self):
"""Test story with TOC and a cascaded header hierarchy.
The story should contain exactly one table of contents that is
immediatly followed by a list of of cascaded levels of header
lines, each nested one level deeper than the previous one.
Features to be visually confirmed by a human being are:
1. TOC lines are indented in multiples of 1 cm.
2. Wrapped TOC lines continue with additional 0.5 cm indentation.
3. Only entries of every second level has links
...
"""
maxLevels = 12
# Create styles to be used for document headers
# on differnet levels.
headerLevelStyles = []
for i in range(maxLevels):
headerLevelStyles.append(makeHeaderStyle(i))
# Create styles to be used for TOC entry lines
# for headers on differnet levels.
tocLevelStyles = []
d, e = tableofcontents.delta, tableofcontents.epsilon
for i in range(maxLevels):
tocLevelStyles.append(makeTocHeaderStyle(i, d, e))
# Build story.
story = []
styleSheet = getSampleStyleSheet()
bt = styleSheet['BodyText']
description = '<font color=red>%s</font>' % self.test0.__doc__
story.append(XPreformatted(description, bt))
toc = tableofcontents.TableOfContents()
toc.levelStyles = tocLevelStyles
story.append(toc)
for i in range(maxLevels):
story.append(Paragraph('HEADER, LEVEL %d' % i,
headerLevelStyles[i]))
#now put some body stuff in.
txt = randomtext.randomText(randomtext.PYTHON, 5)
para = Paragraph(txt, makeBodyStyle())
story.append(para)
path = outputfile('test_platypus_toc.pdf')
doc = MyDocTemplate(path)
doc.multiBuild(story)
def test1(self):
"""This shows a table which would take more than one page,
and need multiple passes to render. But we preload it
with the right headings to make it go faster. We used
a simple 100-chapter document with one level.
"""
chapters = 30 #goes over one page
headerStyle = makeHeaderStyle(0)
d, e = tableofcontents.delta, tableofcontents.epsilon
tocLevelStyle = makeTocHeaderStyle(0, d, e)
# Build most of the story; we'll re-use it to
# make documents with different numbers of passes.
story = []
styleSheet = getSampleStyleSheet()
bt = styleSheet['BodyText']
description = '<font color=red>%s</font>' % self.test1.__doc__
story.append(XPreformatted(description, bt))
for i in range(chapters):
story.append(PageBreak())
story.append(Paragraph('This is chapter %d' % (i+1),
headerStyle))
#now put some lengthy body stuff in.
for paras in range(random.randint(1,3)):
txt = randomtext.randomText(randomtext.PYTHON, 5)
para = Paragraph(txt, makeBodyStyle())
story.append(para)
#try 1: empty TOC, 3 passes
toc = tableofcontents.TableOfContents()
toc.levelStyles = [tocLevelStyle] #only need one
story1 = [toc] + story
path = outputfile('test_platypus_toc_preload.pdf')
doc = MyDocTemplate(path)
passes = doc.multiBuild(story1)
self.assertEquals(passes, 3)
#try 2: now preload the TOC with the entries
toc = tableofcontents.TableOfContents()
toc.levelStyles = [tocLevelStyle] #only need one
tocEntries = []
for i in range(chapters):
#add tuple of (level, text, pageNum, key)
#with an initial guess of pageNum=0
tocEntries.append((0, 'This is chapter %d' % (i+1), 0, None))
toc.addEntries(tocEntries)
story2 = [toc] + story
path = outputfile('test_platypus_toc_preload.pdf')
doc = MyDocTemplate(path)
passes = doc.multiBuild(story2)
self.assertEquals(passes, 2)
#try 3: preload again but try to be really smart and work out
#in advance what page everything starts on. We cannot
#use a random story for this.
toc3 = tableofcontents.TableOfContents()
toc3.levelStyles = [tocLevelStyle] #only need one
tocEntries = []
for i in range(chapters):
#add tuple of (level, text, pageNum, key)
#with an initial guess of pageNum= 3
tocEntries.append((0, 'This is chapter %d' % i, 2+i, None))
toc3.addEntries(tocEntries)
story3 = [toc3]
for i in range(chapters):
story3.append(PageBreak())
story3.append(Paragraph('This is chapter %d' % (i+1),
headerStyle))
txt = """
The paragraphs in this are not at all random, because
we need to be absolutely, totally certain they will fit
on one page. Each chapter will be one page long.
"""
para = Paragraph(txt, makeBodyStyle())
story3.append(para)
path = outputfile('test_platypus_toc_preload.pdf')
doc = MyDocTemplate(path)
passes = doc.multiBuild(story3)
# I can't get one pass yet'
#self.assertEquals(passes, 1)
def makeSuite():
return makeSuiteForClasses(TocTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
```
#### File: reportlab/tests/test_utils.py
```python
__version__='''$Id: test_utils.py 3288 2008-09-15 11:03:17Z rgbecker $'''
__doc__="""Test reportlab.lib.util module"""
from reportlab.lib.testutils import setOutDir,makeSuiteForClasses, outputfile, printLocation
setOutDir(__name__)
import unittest
class FmtTestCase(unittest.TestCase):
def testFmt(self):
from reportlab.lib.utils import FmtSelfDict
class MixedIn(FmtSelfDict):
def __init__(self):
self.a = 'AA'
self._b = '_BB'
self.d = '(overridden)'
obj = MixedIn()
self.assertEqual('blah', obj._fmt('blah'))
self.assertEqual('blah %', obj._fmt('blah %%'))
self.assertRaises(ValueError, obj._fmt, 'blah %')
self.assertEqual(
'moon AA june_BB spoon %(a)sCC ni',
obj._fmt('moon %(a)s june%(_b)s spoon %%(a)s%(c)s %(d)s', c='CC', C='boon', d='ni'))
self.assertRaises(AttributeError, obj._fmt, '%(c)s') # XXX bit weird, can this be changed?
def makeSuite():
return makeSuiteForClasses(FmtTestCase)
#noruntests
if __name__ == "__main__":
unittest.TextTestRunner().run(makeSuite())
printLocation()
```
#### File: tools/docco/t_parse.py
```python
import re, string
from types import StringType
from string import find
#
# template parsing
#
# EG: T = Template("(NNN)NNN-NNNN X X", "X", "N")
# ([area, exch, ext, fn, ln], index) = T.PARSE("(908)949-2726 <NAME>")
#
class Template:
def __init__(self,
template,
wild_card_marker=None,
single_char_marker=None,
**marker_to_regex_dict):
self.template = template
self.wild_card = wild_card_marker
self.char = single_char_marker
# determine the set of markers for this template
markers = marker_to_regex_dict.keys()
if wild_card_marker:
markers.append(wild_card_marker)
if single_char_marker:
for ch in single_char_marker: # allow multiple scm's
markers.append(ch)
self.char = single_char_primary = single_char_marker[0]
self.markers = markers
for mark in markers:
if len(mark)>1:
raise ValueError, "Marks must be single characters: "+repr(mark)
# compile the regular expressions if needed
self.marker_dict = marker_dict = {}
for (mark, rgex) in marker_to_regex_dict.items():
if type(rgex) == StringType:
rgex = re.compile(rgex)
marker_dict[mark] = rgex
# determine the parse sequence
parse_seq = []
# dummy last char
lastchar = None
index = 0
last = len(template)
# count the number of directives encountered
ndirectives = 0
while index<last:
start = index
thischar = template[index]
# is it a wildcard?
if thischar == wild_card_marker:
if lastchar == wild_card_marker:
raise ValueError, "two wild cards in sequence is not allowed"
parse_seq.append( (wild_card_marker, None) )
index = index+1
ndirectives = ndirectives+1
# is it a sequence of single character markers?
elif single_char_marker and thischar in single_char_marker:
if lastchar == wild_card_marker:
raise ValueError, "wild card cannot precede single char marker"
while index<last and template[index] == thischar:
index = index+1
parse_seq.append( (single_char_primary, index-start) )
ndirectives = ndirectives+1
# is it a literal sequence?
elif not thischar in markers:
while index<last and not template[index] in markers:
index = index+1
parse_seq.append( (None, template[start:index]) )
# otherwise it must be a re marker
else:
rgex = marker_dict[thischar]
parse_seq.append( (thischar, rgex) )
ndirectives = ndirectives+1
index = index+1
lastchar = template[index-1]
self.parse_seq = parse_seq
self.ndirectives = ndirectives
def PARSE(self, str, start=0):
ndirectives = self.ndirectives
wild_card = self.wild_card
single_char = self.char
parse_seq = self.parse_seq
lparse_seq = len(parse_seq) - 1
# make a list long enough for substitutions for directives
result = [None] * ndirectives
current_directive_index = 0
currentindex = start
# scan through the parse sequence, recognizing
for parse_index in xrange(lparse_seq + 1):
(indicator, data) = parse_seq[parse_index]
# is it a literal indicator?
if indicator is None:
if find(str, data, currentindex) != currentindex:
raise ValueError, "literal not found at "+repr((currentindex,data))
currentindex = currentindex + len(data)
else:
# anything else is a directive
# is it a wildcard?
if indicator == wild_card:
# if it is the last directive then it matches the rest of the string
if parse_index == lparse_seq:
last = len(str)
# otherwise must look at next directive to find end of wildcard
else:
# next directive must be re or literal
(nextindicator, nextdata) = parse_seq[parse_index+1]
if nextindicator is None:
# search for literal
last = find(str, nextdata, currentindex)
if last<currentindex:
raise ValueError, \
"couldn't terminate wild with lit "+repr(currentindex)
else:
# data is a re, search for it
last = nextdata.search(str, currentindex)
if last<currentindex:
raise ValueError, \
"couldn't terminate wild with re "+repr(currentindex)
elif indicator == single_char:
# data is length to eat
last = currentindex + data
else:
# other directives are always regular expressions
last = data.match(str, currentindex) + currentindex
if last<currentindex:
raise ValueError, "couldn't match re at "+repr(currentindex)
#print "accepting", str[currentindex:last]
result[current_directive_index] = str[currentindex:last]
current_directive_index = current_directive_index+1
currentindex = last
# sanity check
if current_directive_index != ndirectives:
raise SystemError, "not enough directives found?"
return (result, currentindex)
# some useful regular expressions
USERNAMEREGEX = \
"["+string.letters+"]["+string.letters+string.digits+"_]*"
STRINGLITREGEX = "'[^\n']*'"
SIMPLEINTREGEX = "["+string.digits+"]+"
id = re.compile(USERNAMEREGEX)
str = re.compile(STRINGLITREGEX)
int = re.compile(SIMPLEINTREGEX)
def test():
global T, T1, T2, T3
T = Template("(NNN)NNN-NNNN X X", "X", "N")
print T.PARSE("(908)949-2726 <NAME>")
T1 = Template("s --> s blah", s=str)
s = "' <-- a string --> ' --> 'blah blah another string blah' blah"
print T1.PARSE(s)
T2 = Template("s --> NNNiX", "X", "N", s=str, i=int)
print T2.PARSE("'A STRING' --> 15964653alpha beta gamma")
T3 = Template("XsXi", "X", "N", s=str, i=int)
print T3.PARSE("prefix'string'interior1234junk not parsed")
T4 = Template("MMDDYYX", "X", "MDY")
print T4.PARSE("122961 Somebody's birthday!")
if __name__=="__main__": test()
``` |
{
"source": "1Mathias/PublicNLPA",
"score": 3
} |
#### File: 1Mathias/PublicNLPA/trigram_generator.py
```python
import nltk
def trigramm(s):
sentence = s.split()
#bigramed = list(nltk.bigrams(sentence))
trigrammed =list(nltk.trigrams(sentence))
trigrlist = [('የህዝብ', 'ተወካዮች','ቢሮ'), ('የሚንስትሮች','ምክር', 'ቤት')]
isec = set(trigrammed) & set(trigrlist)
newdata = []
i = []
c = []
for f in trigrammed:
if f in isec:
newdata.append(str(f).replace("'", "").replace(", ", "_"))
c.append(str(f).replace("'", "").replace(", ", "_"))
i.append(int(1))
else:
newdata.append(str(f).replace("'", "").replace(", ", " "))
i.append(int(0))
c.append(tuple(f))
w = []
k = []
s = []
l = []
b = 0
f = 0
for x in range(len(i)):
if (x == 0):
b = 0
else:
b = x - 1
if (x < len(i)):
f = x + 1
else:
f = x
# if(i[b]!=1):
# l.append(i[x])
# s.append(c[x])
b2 = 0
b = 0
y = 0
t=[]
for x in range(len(i)):
# Two forwards
if (x == 0 or x == 1):
b2 = x
else:
b2 = x - 2
y = 0 # forward loop
if (x < len(i) - 1):
y = x + 1
else:
y = x
b = 0 # backward loop
if (x == 0):
b = x
else:
b = x - 1
y2=0 # two forwards
if(x<len(i)-2):
y2=x+2
else:
y2=x
if(i[y]==1 and i[x]==0):
continue
elif(i[b]==1 and i[x]==0):
continue
elif(i[y]==1):
t.append(c[x])
elif(i[y2]==1):
t.append((c[x][0],c[x][1]))
elif(i[b2]==1):
t.append(c[x][1],c[x][2])
elif(i[x]==1):
t.append(c[x])
elif(i[x]==0 and i[y]==0 and i[y2]==0):
t.append(c[x][0])
if (i[x] == 0 and i[y] == 1 and i[b] == 0 and i[b2 == 0]):
# print(c[x][0])
k.append(c[x][0])
elif (i[x] == 0 and i[y] == 1 and i[b] == 0 and i[b2 == 1]):
continue
elif (i[x] == 1 and i[y] == 0):
# print(c[x])
k.append(c[x])
elif (i[x] == 0 and i[y] == 0 and i[b] == 0 and i[b2] == 0):
k.append(c[x])
elif (i[x] == 0 and i[y] == 0 and i[b] == 0 and i[b2] == 1):
k.append(c[x][1])
elif (i[x] == 0 and i[b] == 0):
k.append(c[x])
elif (i[x] == 0 and i[b] == 1):
k.append(c[x][1])
# print(c[x])
print(t)
return k
# s = "ዛሬ ፍርድ ቤት እና ትምህርት ቤት ነበርኩ"
# sentence = ['ፍርድ','ቤት', 'beginning','ትምህርት','ቤት', 'God', 'created', 'the', 'heaven','and', 'the', 'earth', '.']
# sentence = s.split()
# print(texts)
# print(*map(' '.join, isec), sep=', ')
``` |
{
"source": "1matthewli/CornerNet",
"score": 3
} |
#### File: models/py_utils/tree_utils.py
```python
from anytree import Node, RenderTree
def read_tree():
tree_file = '../../metadata/9k.tree'
f = open(tree_file, 'r')
node_indices = {}
node_dict = {}
root = Node("root")
node_indices[-1] = root
lines = f.readlines()
for i in range(len(lines)):
line = lines[i]
tokens = line.split()
new_node = Node(tokens[0], parent=node_indices[int(tokens[1])])
node_indices[i] = new_node
node_dict[tokens[0]] = new_node
groups = []
print(groups)
add_group([root], groups)
print(groups[1])
return node_dict
def add_group(level, groups):
for node in level:
if not node.is_leaf:
children = node.children
curr_group = [n.name for n in children]
groups.append(curr_group)
add_group(children, groups)
tree_dict = read_tree()
``` |
{
"source": "1Maxnet1/opbasm",
"score": 3
} |
#### File: doc/util/extract_lib_docs.py
```python
from sphinxcontrib.napoleon import *
import re
class PicoBlazeGoogleDocstring(GoogleDocstring):
def __init__(self, docstring, config=None, app=None, what='', name='',
obj=None, options=None):
super(PicoBlazeGoogleDocstring, self).__init__(docstring, config, app, what,
name, obj, options)
def _parse_examples_section(self, section):
'''Create an examples section with a PicoBlaze code block'''
header = ['', '.. rubric:: Example:', '']
block = ['.. code-block:: picoblaze', '']
lines = self._consume_usage_section()
lines = self._indent(lines, 3)
return header + block + lines + ['']
def _parse_parameters_section(self, section):
'''Need to add blank line before parameter list'''
fields = self._consume_fields()
if self._config.napoleon_use_param:
lines = []
for _name, _type, _desc in fields:
field = ':param %s: ' % _name
lines.extend(self._format_block(field, _desc))
if _type:
lines.append(':type %s: %s' % (_name, _type))
return [''] + lines + ['']
else:
return [''] + self._format_fields('Parameters', fields)
def extract_docstrings(fname):
'''Pull out docstrings from picoblaze.m4'''
funcs = {}
state = None
ds = []
with open(fname) as fh:
for l in fh:
if l.startswith(';--------'):
state = 'docstring'
ds = []
continue
if state == 'docstring':
if l.startswith(';'):
ds.append(l[2:].rstrip())
elif l.startswith('define('):
# Get macro name
m = re.match(r"^\s*define\(\s*`([^_][\w]+)'", l)
if not m:
m = re.match(r"^\s*define\(\s*<!([^_][\w]+)!>", l)
if m:
funcs[m.group(1)] = ds
state = None
return funcs
def indent(text, spaces=2):
prefix = ' '*spaces
return prefix + ('\n'+prefix).join(text.split('\n'))
def build_signature(macro, body):
'''Generate a macro signature from the parameter list'''
params = []
for l in body.split('\n'):
m = re.search(r':param\s+([^:]*)\s*:', l)
if m:
p = m.group(1)
if p not in params:
if 'optional' in l.lower():
p = '[{}]'.format(p)
params.append(p)
sig = '{}({})'.format(macro, ', '.join(params))
sig = sig.replace('], [', ', ')
return sig
funcs = extract_docstrings(sys.argv[1])
print '''
Opbasm PicoBlaze macro library reference
========================================
'''
for k in sorted(funcs.iterkeys()):
body = indent(str(PicoBlazeGoogleDocstring(funcs[k])))
signature = build_signature(k, body)
print '.. pb:macro:: {}\n\n{}\n'.format(signature, body)
```
#### File: opbasm/opbasm/color.py
```python
from __future__ import print_function, division
try:
import colorama
colorama.init()
from colorama import Fore, Back, Style
except ImportError:
def note(t): return t
def success(t): return t
def warn(t): return t
def error(t): return t
else:
import os
_no_color = os.getenv('NO_COLOR', 'false')
_no_color = True if _no_color.lower() in ['1', 'true', 't', 'y', 'yes'] else False
def stdout_redirected():
return os.fstat(0) != os.fstat(1)
_redir_stdout = stdout_redirected()
def colorize(t, code):
if _no_color or _redir_stdout:
return t
return ''.join([code, t, Style.RESET_ALL])
def note(t):
return colorize(t, Fore.MAGENTA)
def success(t):
return colorize(t, Fore.GREEN)
def warn(t):
return colorize(t, Fore.YELLOW + Style.BRIGHT)
def error(t):
return colorize(t, Fore.RED + Style.BRIGHT)
if __name__ == '__main__':
print('Colorized text:\n')
print('note("foobar") : ' + note('foobar'))
print('success("foobar") : ' + success('foobar'))
print('warn("foobar") : ' + warn('foobar'))
print('error("foobar") : ' + error('foobar'))
```
#### File: opbasm/opbasm/devices.py
```python
from __future__ import print_function, division, unicode_literals, absolute_import
from opbasm.common import *
class DeviceArch(object):
def __init__(self):
self.name = ''
self.short_name = ''
def instruction_words(self, asm, stmt):
'''Determine the number of words generated for each instruction
Normally this is 1 but the OUTPUTK and LOAD&RETURN instructions are
replicated if a string or table is passed as an operand.
s : Statement object
'''
return 1 if stmt.is_instruction() else 0
class DevicePb3(DeviceArch):
def __init__(self):
self.name = 'PicoBlaze-3'
self.short_name = 'pb3'
self.has_string_table_support = False
self.zero_instr = 'LOAD s0, 00'
self.opcodes = { \
'add': 0x18000, 'addcy': 0x1a000, 'and': 0x0a000, 'call': 0x30000, \
'compare': 0x14000, 'disable': 0x3c000, 'enable': 0x3c001,
'fetch': 0x06000, 'input': 0x04000, 'jump': 0x34000, \
'load': 0x00000, 'or': 0x0c000, 'output': 0x2c000, 'return': 0x2a000, \
'returni': 0x38000, \
'rl': 0x20002, 'rr': 0x2000c, 'sl0': 0x20006, 'sl1': 0x20007, \
'slx': 0x20004, 'sla': 0x20000, 'sr0': 0x2000e, 'sr1': 0x2000f, \
'sra': 0x20008, 'srx': 0x2000a, 'store': 0x2e000, 'sub': 0x1c000, \
'subcy': 0x1e000, 'test': 0x12000, 'xor': 0x0e000, \
'inst': 0x00000 \
}
self.flag_opcodes = set(('call', 'jump', 'return'))
self.flag_codes = {
'c' : 0x1800,
'nc': 0x1c00,
'z' : 0x1000,
'nz': 0x1400
}
self.return_flag_codes = self.flag_codes
self.addr_opcodes = set(('call', 'jump'))
self.one_reg_opcodes = set(('rl', 'rr', 'sl0', 'sl1', 'sla', 'slx', 'sr0', 'sr1', 'sra', 'srx'))
self.two_reg_opcodes = set(('add', 'addcy', 'and', 'compare', 'fetch', 'input', \
'load', 'or', 'output', 'store', 'sub', 'subcy', 'test', 'xor'))
self.two_reg_op_offset = 0x1000
self.directives = set(('address', 'constant', 'namereg', \
'include', 'default_jump'))
class DevicePb6(DeviceArch):
def __init__(self):
self.name = 'PicoBlaze-6'
self.short_name = 'pb6'
self.has_string_table_support = True
self.zero_instr = 'LOAD s0, s0'
self.opcodes = { \
'add': 0x11000, 'addcy': 0x13000, 'and': 0x03000, 'call': 0x20000, \
'compare': 0x1d000, 'disable': 0x28000, 'enable': 0x28001,
'fetch': 0x0b000, 'input': 0x09000, 'jump': 0x22000, \
'load': 0x01000, 'or': 0x05000, 'output': 0x2d000, 'return': 0x25000, \
'returni': 0x29000, \
'rl': 0x14002, 'rr': 0x1400c, 'sl0': 0x14006, 'sl1': 0x14007, \
'slx': 0x14004, 'sla': 0x14000, 'sr0': 0x1400e, 'sr1': 0x1400f, \
'sra': 0x14008, 'srx': 0x1400a, 'store': 0x2f000, 'sub': 0x19000, \
'subcy': 0x1b000, 'test': 0x0d000, 'xor': 0x07000, \
# New PicoBlaze-6 instructions:
'call@': 0x24000, 'comparecy': 0x1f000, 'hwbuild': 0x14080, 'jump@': 0x26000, \
'load&return': 0x21000, 'outputk': 0x2b000, 'regbank': 0x37000, 'star': 0x17000, \
'testcy': 0x0f000, 'inst': 0x00000 \
}
self.flag_opcodes = set(('call', 'jump', 'return'))
self.flag_codes = {
'c' : 0x18000,
'nc': 0x1c000,
'z' : 0x10000,
'nz': 0x14000
}
# PicoBlaze-6 uses inconsistent offsets for the conditional return instructions
self.return_flag_codes = {
'c' : 0x14000,
'nc': 0x18000,
'z' : 0x0c000,
'nz': 0x10000
}
self.addr_opcodes = set(('call', 'jump'))
self.one_reg_opcodes = set(('rl', 'rr', 'sl0', 'sl1', 'sla', 'slx', 'sr0', 'sr1', 'sra', 'srx', \
'hwbuild'))
self.two_reg_opcodes = set(('add', 'addcy', 'and', 'compare', 'fetch', 'input', \
'load', 'or', 'output', 'store', 'sub', 'subcy', 'test', 'xor', \
'comparecy', 'testcy'))
self.two_reg_op_offset = -0x1000
self.directives = set(('address', 'constant', 'namereg', \
'include', 'default_jump', 'string', 'table'))
def instruction_words(self, asm, stmt):
'''Determine the number of words generated for each instruction
Normally this is 1 but the OUTPUTK and LOAD&RETURN instructions are
replicated if a string or table is passed as an operand.
s : Statement object
'''
if stmt.is_instruction():
num_words = 1
array_name = None
if stmt.command == 'outputk':
if stmt.arg1 is not None and stmt.arg1[-1] in ('$', '#'):
array_name = stmt.arg1
elif stmt.command == 'load&return':
if stmt.arg2 is not None and stmt.arg2[-1] in ('$', '#'):
array_name = stmt.arg2
if array_name is not None:
if array_name[-1] == '$':
if array_name not in asm.strings:
raise StatementError(stmt, _('Unknown string:'), array_name)
num_words = len(asm.strings[array_name].value)
else: # Table
if array_name not in asm.tables:
raise StatementError(stmt, _('Unknown table:'), array_name)
num_words = len(asm.tables[array_name].value)
return num_words
else:
return 0
```
#### File: opbasm/opbasm/hamming.py
```python
from __future__ import print_function, division
import math
def split_bits(n, num_bits):
'''Convert integer to a list of bits (MSB-first)
n (int)
The number to convert to bits.
num_bits (int)
The number of bits in the result.
Returns a list of ints representing each bit in n.
'''
bits = [0] * num_bits
for i in xrange(num_bits-1, -1, -1):
bits[i] = n & 0x01
n >>= 1
return bits
def join_bits(bits):
'''Convert an array of bits (MSB first) to an integer word
bits (sequence of ints)
The bits to be merged into an integer
Returns an int representing the bits contained in the bits parameter.
'''
word = 0
for b in bits:
word = (word << 1) | b
return word
def hamming_message_size( data_size ):
'''Calculate total message size from the data size'''
psize = int(math.log(data_size, 2)) + 1
if (2**psize - 1) - psize < data_size:
psize += 1
return data_size + psize
def hamming_parity_size( message_size ):
'''Calculate parity size from the total message size'''
return int(math.ceil(math.log(message_size + 1, 2)))
def hamming_data_size( message_size ):
'''Calculate data size from the total message size'''
return message_size - hamming_parity_size(message_size)
def secded_message_size( data_size ):
'''Calculate total SECDED message size from the data size'''
return hamming_message_size(data_size) + 1
def secded_parity_size( message_size ):
'''Calculate SECDED parity size from the total message size'''
return hamming_parity_size(message_size) + 1
def secded_data_size( message_size ):
'''Calculate SECDED data size from the total message size'''
return message_size - secded_parity_size(message_size)
def hamming_interleave( data, parity_bits ):
'''Combine separate data and parity bits into a message with interleaved parity
data : array of data bits
parity_bits : array of parity_bits. Use all 0's when encoding
Returns full message with interleaved bits
'''
message_size = len(data) + len(parity_bits)
assert len(data) == hamming_data_size(message_size), 'Data and parity size mismatch'
msg = [0 for _ in xrange(message_size)]
parity_ix = 0
data_ix = 0
for i in xrange(1,len(msg)+1):
if 2**int(math.ceil(math.log(i,2))) == i: # This is a power of 2 and will be a parity bit
msg[i-1] = parity_bits[parity_ix]
parity_ix += 1
else:
msg[i-1] = data[data_ix]
data_ix += 1
return msg
def hamming_parity( message ):
'''Generate Hamming parity bits from an interleaved message
This is the core routine of the package that determines which bits of a
message to XOR together. It is employed for both encoding and decoding
When encoding, the message should have all zeroes interleaved for the
parity bits. The result is the parity to be used by a decoder.
When decoding, the previously generated parity bits are interleaved and
the result is a syndrome that can be used for error detection and
correction.
Returns the computed parity bits
'''
result = [0 for _ in xrange(hamming_parity_size(len(message)))]
result_ix = 0
for i in xrange(1, len(message)+1):
if 2**int(math.ceil(math.log(i,2))) == i: # This is a power of 2
count = i
parity_bit = 0
for d in xrange(i, len(message)+1):
if count > 0:
parity_bit ^= message[d-1]
elif count == 1 - i:
count = i + 1
count -= 1
result[result_ix] = parity_bit
result_ix += 1
return result
def hamming_encode( data ):
'''Generate parity for the data'''
parity_bits = [0 for _ in xrange(hamming_parity_size(hamming_message_size(len(data))))]
parity_bits = hamming_parity(hamming_interleave(data, parity_bits))
return parity_bits
def secded_encode( data ):
'''Generate SECDED parity for the data'''
parity_bits = hamming_encode(data)
overall = reduce(lambda a,b: a ^ b, data) ^ reduce(lambda a,b: a ^ b, parity_bits)
parity_bits.append(overall)
return parity_bits
def secded_encode_num( data, bits ):
'''Generate SECDED parity from an integer values
data : Integer data to computer SECDED parity on
bits : Number of bits in the data_format
Returns an integer representing the SECDED parity'''
adata = [int(b) for b in reversed(split_bits(data, bits))]
sparity = secded_encode(adata)
return join_bits(reversed(sparity))
``` |
{
"source": "1mbtxn/BitcoinUnlimited",
"score": 2
} |
#### File: test/functional/net.py
```python
import time
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_jsonrpc,
connect_nodes_bi,
p2p_port,
start_nodes,
)
class NetTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
connect_nodes_bi(self.nodes, 0, 1)
self.is_network_split = False
self.sync_all()
def run_test(self):
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
self.nodes[0].setnetworkactive(False)
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False)
timeout = 3
while self.nodes[0].getnetworkinfo()['connections'] != 0:
# Wait a bit for all sockets to close
assert timeout > 0, 'not all connections closed in time'
timeout -= 0.1
time.sleep(0.1)
self.nodes[0].setnetworkactive(True)
connect_nodes_bi(self.nodes, 0, 1)
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
# test getaddednodeinfo
assert_equal(self.nodes[0].getaddednodeinfo(), [])
# add a node (node2) to node0
ip_port = "127.0.0.1:{}".format(p2p_port(2))
self.nodes[0].addnode(ip_port, 'add')
# check that the node has indeed been added
added_nodes = self.nodes[0].getaddednodeinfo(ip_port)
assert_equal(len(added_nodes), 1)
assert_equal(added_nodes[0]['addednode'], ip_port)
# check that a non-existant node returns an error
assert_raises_jsonrpc(-24, "Node has not been added",
self.nodes[0].getaddednodeinfo, '1.1.1.1')
if __name__ == '__main__':
NetTest().main()
``` |
{
"source": "1mehal/imdb-graphql",
"score": 2
} |
#### File: python/imdb_graphql/schema.py
```python
import graphene
from graphene_sqlalchemy import SQLAlchemyObjectType
from sqlalchemy import func, desc
from .models import (
Title as TitleModel,
Movie as MovieModel,
Series as SeriesModel,
Episode as EpisodeModel,
EpisodeInfo as EpisodeInfoModel,
Rating as RatingModel,
Name as NameModel,
TitleType as TitleTypeEnum
)
TitleType = graphene.Enum.from_enum(TitleTypeEnum)
class Rating(SQLAlchemyObjectType):
class Meta:
model = RatingModel
interfaces = (graphene.relay.Node, )
imdbID = graphene.String()
averageRating = graphene.Float()
numVotes = graphene.Int()
class Title(graphene.Interface):
imdbID = graphene.String()
titleType = graphene.String()
primaryTitle = graphene.String()
originalTitle = graphene.String()
isAdult = graphene.Boolean()
startYear = graphene.Int()
endYear = graphene.Int()
runtime = graphene.Int()
genres = graphene.String()
averageRating = graphene.Float()
numVotes = graphene.Int()
exclude_fields = ('titleSearchCol', '_type', )
class Movie(SQLAlchemyObjectType):
class Meta:
model = MovieModel
interfaces = (Title, )
exclude_fields = exclude_fields
class Episode(SQLAlchemyObjectType):
class Meta:
model = EpisodeModel
interfaces = (Title, )
exclude_fields = exclude_fields
seasonNumber = graphene.Int()
episodeNumber = graphene.Int()
series = graphene.Field(lambda: Series)
class Series(SQLAlchemyObjectType):
class Meta:
model = SeriesModel
interfaces = (Title, )
exclude_fields = exclude_fields
totalSeasons = graphene.Int()
episodes = graphene.Field(
graphene.List(Episode),
season=graphene.List(graphene.Int)
)
def resolve_episodes(self, info, **args):
query = (
Episode
.get_query(info)
.join(EpisodeModel.info)
.filter_by(seriesID=self.imdbID)
)
query = (
query.filter(EpisodeInfoModel.seasonNumber.in_(args['season']))
if 'season' in args else query
)
query = (
query.order_by(
EpisodeInfoModel.seasonNumber,
EpisodeInfoModel.episodeNumber
)
if 'season' in args and len(args['season']) > 1
else query.order_by(EpisodeInfoModel.episodeNumber)
)
return query
def resolve_totalSeasons(self, info):
return(
EpisodeInfoModel
.query
.with_entities(EpisodeInfoModel.seasonNumber)
.filter_by(seriesID=self.imdbID)
.group_by(EpisodeInfoModel.seasonNumber)
.count()
)
class Name(SQLAlchemyObjectType):
class Meta:
model = NameModel
interfaces = (graphene.relay.Node, )
exclude_fields = ('id', )
imdbID = graphene.String()
birthYear = graphene.Int()
deathYear = graphene.Int()
primaryName = graphene.String()
primaryProfession = graphene.String()
knownForTitles = graphene.List(Title)
def resolve_knownForTitles(self, info):
query = (
TitleModel
.query
.filter(TitleModel.imdbID.in_(self.knownForTitles.split(',')))
) if self.knownForTitles is not None else None
return query
class Query(graphene.ObjectType):
title = graphene.Field(Title, imdbID=graphene.String(required=True))
movie = graphene.Field(Movie, imdbID=graphene.String(required=True))
series = graphene.Field(Series, imdbID=graphene.String(required=True))
episode = graphene.Field(Episode, imdbID=graphene.String(required=True))
titleSearch = graphene.Field(
graphene.List(Title),
title=graphene.String(required=True),
types=graphene.List(TitleType),
result=graphene.Int(default_value=5)
)
name = graphene.Field(Name, imdbID=graphene.String(required=True))
rating = graphene.Field(Rating, imdbID=graphene.String(required=True))
nameSearch = graphene.Field(
graphene.List(Name),
name=graphene.String(required=True),
result=graphene.Int(default_value=10)
)
def resolve_title(self, info, imdbID):
return TitleModel.query.filter_by(imdbID=imdbID).first()
def resolve_movie(self, info, imdbID):
return Movie.get_query(info).filter_by(imdbID=imdbID).first()
def resolve_series(self, info, imdbID):
return Series.get_query(info).filter_by(imdbID=imdbID).first()
def resolve_episode(self, info, imdbID):
return Episode.get_query(info).filter_by(imdbID=imdbID).first()
def resolve_titleSearch(self, info, title, types=None, result=None):
tsquery = func.to_tsquery(f'\'{title}\'')
query = (
TitleModel
.query
.filter(TitleModel.titleSearchCol.op('@@')(tsquery))
)
query = (
query.filter(TitleModel._type.in_(types))
if types is not None else query
)
query = (
query
.join(TitleModel.rating)
.order_by(
desc(RatingModel.numVotes >= 1000),
desc(TitleModel.primaryTitle.ilike(title)),
desc(RatingModel.numVotes),
desc(func.ts_rank_cd(TitleModel.titleSearchCol, tsquery, 1))
)
.limit(result)
)
return query
def resolve_name(self, info, imdbID):
return NameModel.query.filter_by(imdbID=imdbID).first()
def resolve_rating(self, info, imdbID):
return RatingModel.query.filter_by(imdbID=imdbID).first()
def resolve_nameSearch(self, info, name, result=None):
query = (
NameModel
.query
.filter(NameModel.primaryName.ilike(f'%{name}%'))
.order_by(
NameModel.primaryName,
NameModel.birthYear
)
.limit(result)
)
return query
schema = graphene.Schema(query=Query, types=[Movie, Series, Episode, Name])
``` |
{
"source": "1-MillionParanoidTterabytes/Blender-2.79b-blackened",
"score": 3
} |
#### File: python_api/examples/bpy.props.5.py
```python
import bpy
# Simple property reading/writing from ID properties.
# This is what the RNA would do internally.
def get_float(self):
return self["testprop"]
def set_float(self, value):
self["testprop"] = value
bpy.types.Scene.test_float = bpy.props.FloatProperty(get=get_float, set=set_float)
# Read-only string property, returns the current date
def get_date(self):
import datetime
return str(datetime.datetime.now())
bpy.types.Scene.test_date = bpy.props.StringProperty(get=get_date)
# Boolean array. Set function stores a single boolean value, returned as the second component.
# Array getters must return a list or tuple
# Array size must match the property vector size exactly
def get_array(self):
return (True, self["somebool"])
def set_array(self, values):
self["somebool"] = values[0] and values[1]
bpy.types.Scene.test_array = bpy.props.BoolVectorProperty(size=2, get=get_array, set=set_array)
# Enum property.
# Note: the getter/setter callback must use integer identifiers!
test_items = [
("RED", "Red", "", 1),
("GREEN", "Green", "", 2),
("BLUE", "Blue", "", 3),
("YELLOW", "Yellow", "", 4),
]
def get_enum(self):
import random
return random.randint(1, 4)
def set_enum(self, value):
print("setting value", value)
bpy.types.Scene.test_enum = bpy.props.EnumProperty(items=test_items, get=get_enum, set=set_enum)
# Testing
scene = bpy.context.scene
scene.test_float = 12.34
print(scene.test_float)
scene.test_array = (True, False)
print([x for x in scene.test_array])
# scene.test_date = "blah" # this would fail, property is read-only
print(scene.test_date)
scene.test_enum = 'BLUE'
print(scene.test_enum)
# >>> 12.34000015258789
# >>> [True, False]
# >>> 2013-01-05 16:33:52.135340
# >>> setting value 3
# >>> GREEN
```
#### File: python_api/examples/bpy.types.Operator.6.py
```python
import bpy
from bpy.props import EnumProperty
class SearchEnumOperator(bpy.types.Operator):
bl_idname = "object.search_enum_operator"
bl_label = "Search Enum Operator"
bl_property = "my_search"
my_search = EnumProperty(
name="My Search",
items=(
('FOO', "Foo", ""),
('BAR', "Bar", ""),
('BAZ', "Baz", ""),
),
)
def execute(self, context):
self.report({'INFO'}, "Selected:" + self.my_search)
return {'FINISHED'}
def invoke(self, context, event):
context.window_manager.invoke_search_popup(self)
return {'RUNNING_MODAL'}
bpy.utils.register_class(SearchEnumOperator)
# test call
bpy.ops.object.search_enum_operator('INVOKE_DEFAULT')
```
#### File: release/datafiles/blender_icons_update.py
```python
import os
import sys
def run(cmd):
print(" ", cmd)
os.system(cmd)
BASEDIR = os.path.abspath(os.path.dirname(__file__)) + os.sep
inkscape_path = 'inkscape'
if sys.platform == 'darwin':
inkscape_app_path = '/Applications/Inkscape.app/Contents/Resources/script'
if os.path.exists(inkscape_app_path):
inkscape_path = inkscape_app_path
cmd = inkscape_path + ' "%sblender_icons.svg" --export-dpi=90 --without-gui --export-png="%sblender_icons16.png"' % (BASEDIR, BASEDIR)
run(cmd)
cmd = inkscape_path + ' "%sblender_icons.svg" --export-dpi=180 --without-gui --export-png="%sblender_icons32.png"' % (BASEDIR, BASEDIR)
run(cmd)
# For testing it can be good to clear all old
# rm ./blender_icons16/*.dat
# rm ./blender_icons32/*.dat
datatoc_icon_split_py = os.path.join(BASEDIR, "..", "..", "source", "blender", "datatoc", "datatoc_icon_split.py")
# create .dat pixmaps (which are stored in git)
cmd = (
"blender "
"--background -noaudio "
"--python " + datatoc_icon_split_py + " -- "
"--image=" + BASEDIR + "blender_icons16.png "
"--output=" + BASEDIR + "blender_icons16 "
"--output_prefix=icon16_ "
"--name_style=UI_ICONS "
"--parts_x 26 --parts_y 30 "
"--minx 3 --maxx 53 --miny 3 --maxy 8 "
"--minx_icon 2 --maxx_icon 2 --miny_icon 2 --maxy_icon 2 "
"--spacex_icon 1 --spacey_icon 1"
)
run(cmd)
cmd = (
"blender "
"--background -noaudio "
"--python " + datatoc_icon_split_py + " -- "
"--image=" + BASEDIR + "blender_icons32.png "
"--output=" + BASEDIR + "blender_icons32 "
"--output_prefix=icon32_ "
"--name_style=UI_ICONS "
"--parts_x 26 --parts_y 30 "
"--minx 6 --maxx 106 --miny 6 --maxy 16 "
"--minx_icon 4 --maxx_icon 4 --miny_icon 4 --maxy_icon 4 "
"--spacex_icon 2 --spacey_icon 2"
)
run(cmd)
os.remove(BASEDIR + "blender_icons16.png")
os.remove(BASEDIR + "blender_icons32.png")
# For testing, if we want the PNG of each image
# ./datatoc_icon_split_to_png.py ./blender_icons16/*.dat
# ./datatoc_icon_split_to_png.py ./blender_icons32/*.dat
```
#### File: addons/space_view3d_pie_menus/pie_sculpt_menu.py
```python
bl_info = {
"name": "Hotkey: 'W'",
"description": "Sculpt Brush Menu",
"author": "pitiwazou, meta-androcto",
"version": (0, 1, 0),
"blender": (2, 77, 0),
"location": "W key",
"warning": "",
"wiki_url": "",
"category": "Sculpt Pie"
}
import bpy
from bpy.types import (
Menu,
Operator,
)
# Sculpt Draw
class SculptSculptDraw(Operator):
bl_idname = "sculpt.sculptraw"
bl_label = "Sculpt SculptDraw"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
context.tool_settings.sculpt.brush = bpy.data.brushes['SculptDraw']
return {'FINISHED'}
# Pie Sculp Pie Menus - W
class PieSculptPie(Menu):
bl_idname = "pie.sculpt"
bl_label = "Pie Sculpt"
def draw(self, context):
layout = self.layout
pie = layout.menu_pie()
# 4 - LEFT
pie.operator("paint.brush_select",
text="Crease", icon='BRUSH_CREASE').sculpt_tool = 'CREASE'
# 6 - RIGHT
pie.operator("paint.brush_select",
text='Blob', icon='BRUSH_BLOB').sculpt_tool = 'BLOB'
# 2 - BOTTOM
pie.menu(PieSculpttwo.bl_idname,
text="More Brushes", icon='BRUSH_SMOOTH')
# 8 - TOP
pie.operator("sculpt.sculptraw",
text='SculptDraw', icon='BRUSH_SCULPT_DRAW')
# 7 - TOP - LEFT
pie.operator("paint.brush_select",
text="Clay", icon='BRUSH_CLAY').sculpt_tool = 'CLAY'
# 9 - TOP - RIGHT
pie.operator("paint.brush_select",
text='Claystrips', icon='BRUSH_CLAY_STRIPS').sculpt_tool = 'CLAY_STRIPS'
# 1 - BOTTOM - LEFT
pie.operator("paint.brush_select",
text='Inflate/Deflate', icon='BRUSH_INFLATE').sculpt_tool = 'INFLATE'
# 3 - BOTTOM - RIGHT
pie.menu(PieSculptthree.bl_idname,
text="Grab Brushes", icon='BRUSH_GRAB')
# Pie Sculpt 2
class PieSculpttwo(Menu):
bl_idname = "pie.sculpttwo"
bl_label = "Pie Sculpt 2"
def draw(self, context):
layout = self.layout
layout.operator("paint.brush_select", text='Smooth',
icon='BRUSH_SMOOTH').sculpt_tool = 'SMOOTH'
layout.operator("paint.brush_select", text='Flatten',
icon='BRUSH_FLATTEN').sculpt_tool = 'FLATTEN'
layout.operator("paint.brush_select", text='Scrape/Peaks',
icon='BRUSH_SCRAPE').sculpt_tool = 'SCRAPE'
layout.operator("paint.brush_select", text='Fill/Deepen',
icon='BRUSH_FILL').sculpt_tool = 'FILL'
layout.operator("paint.brush_select", text='Pinch/Magnify',
icon='BRUSH_PINCH').sculpt_tool = 'PINCH'
layout.operator("paint.brush_select", text='Layer',
icon='BRUSH_LAYER').sculpt_tool = 'LAYER'
layout.operator("paint.brush_select", text='Mask',
icon='BRUSH_MASK').sculpt_tool = 'MASK'
# Pie Sculpt Three
class PieSculptthree(Menu):
bl_idname = "pie.sculptthree"
bl_label = "Pie Sculpt 3"
def draw(self, context):
layout = self.layout
layout.operator("paint.brush_select",
text='Grab', icon='BRUSH_GRAB').sculpt_tool = 'GRAB'
layout.operator("paint.brush_select",
text='Nudge', icon='BRUSH_NUDGE').sculpt_tool = 'NUDGE'
layout.operator("paint.brush_select",
text='Thumb', icon='BRUSH_THUMB').sculpt_tool = 'THUMB'
layout.operator("paint.brush_select",
text='Snakehook', icon='BRUSH_SNAKE_HOOK').sculpt_tool = 'SNAKE_HOOK'
layout.operator("paint.brush_select",
text='Twist', icon='BRUSH_ROTATE').sculpt_tool = 'ROTATE'
classes = (
PieSculptPie,
PieSculpttwo,
PieSculptthree,
SculptSculptDraw,
)
addon_keymaps = []
def register():
for cls in classes:
bpy.utils.register_class(cls)
wm = bpy.context.window_manager
if wm.keyconfigs.addon:
# Sculpt Pie Menu
km = wm.keyconfigs.addon.keymaps.new(name='Sculpt')
kmi = km.keymap_items.new('wm.call_menu_pie', 'W', 'PRESS')
kmi.properties.name = "pie.sculpt"
addon_keymaps.append((km, kmi))
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
wm = bpy.context.window_manager
kc = wm.keyconfigs.addon
if kc:
for km, kmi in addon_keymaps:
km.keymap_items.remove(kmi)
addon_keymaps.clear()
if __name__ == "__main__":
register()
```
#### File: startup/bl_operators/presets.py
```python
import bpy
from bpy.types import Menu, Operator
from bpy.props import StringProperty, BoolProperty
class AddPresetBase:
"""Base preset class, only for subclassing
subclasses must define
- preset_values
- preset_subdir """
# bl_idname = "script.preset_base_add"
# bl_label = "Add a Python Preset"
# only because invoke_props_popup requires. Also do not add to search menu.
bl_options = {'REGISTER', 'INTERNAL'}
name = StringProperty(
name="Name",
description="Name of the preset, used to make the path name",
maxlen=64,
options={'SKIP_SAVE'},
)
remove_active = BoolProperty(
default=False,
options={'HIDDEN', 'SKIP_SAVE'},
)
# needed for mix-ins
order = [
"name",
"remove_active",
]
@staticmethod
def as_filename(name): # could reuse for other presets
# lazy init maketrans
def maketrans_init():
cls = AddPresetBase
attr = "_as_filename_trans"
trans = getattr(cls, attr, None)
if trans is None:
trans = str.maketrans({char: "_" for char in " !@#$%^&*(){}:\";'[]<>,.\\/?"})
setattr(cls, attr, trans)
return trans
name = name.lower().strip()
name = bpy.path.display_name_to_filepath(name)
trans = maketrans_init()
return name.translate(trans)
def execute(self, context):
import os
if hasattr(self, "pre_cb"):
self.pre_cb(context)
preset_menu_class = getattr(bpy.types, self.preset_menu)
is_xml = getattr(preset_menu_class, "preset_type", None) == 'XML'
if is_xml:
ext = ".xml"
else:
ext = ".py"
if not self.remove_active:
name = self.name.strip()
if not name:
return {'FINISHED'}
filename = self.as_filename(name)
target_path = os.path.join("presets", self.preset_subdir)
target_path = bpy.utils.user_resource('SCRIPTS',
target_path,
create=True)
if not target_path:
self.report({'WARNING'}, "Failed to create presets path")
return {'CANCELLED'}
filepath = os.path.join(target_path, filename) + ext
if hasattr(self, "add"):
self.add(context, filepath)
else:
print("Writing Preset: %r" % filepath)
if is_xml:
import rna_xml
rna_xml.xml_file_write(context,
filepath,
preset_menu_class.preset_xml_map)
else:
def rna_recursive_attr_expand(value, rna_path_step, level):
if isinstance(value, bpy.types.PropertyGroup):
for sub_value_attr in value.bl_rna.properties.keys():
if sub_value_attr == "rna_type":
continue
sub_value = getattr(value, sub_value_attr)
rna_recursive_attr_expand(sub_value, "%s.%s" % (rna_path_step, sub_value_attr), level)
elif type(value).__name__ == "bpy_prop_collection_idprop": # could use nicer method
file_preset.write("%s.clear()\n" % rna_path_step)
for sub_value in value:
file_preset.write("item_sub_%d = %s.add()\n" % (level, rna_path_step))
rna_recursive_attr_expand(sub_value, "item_sub_%d" % level, level + 1)
else:
# convert thin wrapped sequences
# to simple lists to repr()
try:
value = value[:]
except:
pass
file_preset.write("%s = %r\n" % (rna_path_step, value))
file_preset = open(filepath, 'w', encoding="utf-8")
file_preset.write("import bpy\n")
if hasattr(self, "preset_defines"):
for rna_path in self.preset_defines:
exec(rna_path)
file_preset.write("%s\n" % rna_path)
file_preset.write("\n")
for rna_path in self.preset_values:
value = eval(rna_path)
rna_recursive_attr_expand(value, rna_path, 1)
file_preset.close()
preset_menu_class.bl_label = bpy.path.display_name(filename)
else:
preset_active = preset_menu_class.bl_label
# fairly sloppy but convenient.
filepath = bpy.utils.preset_find(preset_active,
self.preset_subdir,
ext=ext)
if not filepath:
filepath = bpy.utils.preset_find(preset_active,
self.preset_subdir,
display_name=True,
ext=ext)
if not filepath:
return {'CANCELLED'}
try:
if hasattr(self, "remove"):
self.remove(context, filepath)
else:
os.remove(filepath)
except Exception as e:
self.report({'ERROR'}, "Unable to remove preset: %r" % e)
import traceback
traceback.print_exc()
return {'CANCELLED'}
# XXX, stupid!
preset_menu_class.bl_label = "Presets"
if hasattr(self, "post_cb"):
self.post_cb(context)
return {'FINISHED'}
def check(self, context):
self.name = self.as_filename(self.name.strip())
def invoke(self, context, event):
if not self.remove_active:
wm = context.window_manager
return wm.invoke_props_dialog(self)
else:
return self.execute(context)
class ExecutePreset(Operator):
"""Execute a preset"""
bl_idname = "script.execute_preset"
bl_label = "Execute a Python Preset"
filepath = StringProperty(
subtype='FILE_PATH',
options={'SKIP_SAVE'},
)
menu_idname = StringProperty(
name="Menu ID Name",
description="ID name of the menu this was called from",
options={'SKIP_SAVE'},
)
def execute(self, context):
from os.path import basename, splitext
filepath = self.filepath
# change the menu title to the most recently chosen option
preset_class = getattr(bpy.types, self.menu_idname)
preset_class.bl_label = bpy.path.display_name(basename(filepath))
ext = splitext(filepath)[1].lower()
# execute the preset using script.python_file_run
if ext == ".py":
bpy.ops.script.python_file_run(filepath=filepath)
elif ext == ".xml":
import rna_xml
rna_xml.xml_file_run(context,
filepath,
preset_class.preset_xml_map)
else:
self.report({'ERROR'}, "unknown filetype: %r" % ext)
return {'CANCELLED'}
return {'FINISHED'}
class AddPresetRender(AddPresetBase, Operator):
"""Add or remove a Render Preset"""
bl_idname = "render.preset_add"
bl_label = "Add Render Preset"
preset_menu = "RENDER_MT_presets"
preset_defines = [
"scene = bpy.context.scene"
]
preset_values = [
"scene.render.field_order",
"scene.render.fps",
"scene.render.fps_base",
"scene.render.pixel_aspect_x",
"scene.render.pixel_aspect_y",
"scene.render.resolution_percentage",
"scene.render.resolution_x",
"scene.render.resolution_y",
"scene.render.use_fields",
"scene.render.use_fields_still",
]
preset_subdir = "render"
class AddPresetCamera(AddPresetBase, Operator):
"""Add or remove a Camera Preset"""
bl_idname = "camera.preset_add"
bl_label = "Add Camera Preset"
preset_menu = "CAMERA_MT_presets"
preset_defines = [
"cam = bpy.context.camera"
]
preset_subdir = "camera"
use_focal_length = BoolProperty(
name="Include Focal Length",
description="Include focal length into the preset",
options={'SKIP_SAVE'},
)
@property
def preset_values(self):
preset_values = [
"cam.sensor_width",
"cam.sensor_height",
"cam.sensor_fit"
]
if self.use_focal_length:
preset_values.append("cam.lens")
preset_values.append("cam.lens_unit")
return preset_values
class AddPresetSafeAreas(AddPresetBase, Operator):
"""Add or remove a Safe Areas Preset"""
bl_idname = "safe_areas.preset_add"
bl_label = "Add Safe Area Preset"
preset_menu = "SAFE_AREAS_MT_presets"
preset_defines = [
"safe_areas = bpy.context.scene.safe_areas"
]
preset_values = [
"safe_areas.title",
"safe_areas.action",
"safe_areas.title_center",
"safe_areas.action_center",
]
preset_subdir = "safe_areas"
class AddPresetSSS(AddPresetBase, Operator):
"""Add or remove a Subsurface Scattering Preset"""
bl_idname = "material.sss_preset_add"
bl_label = "Add SSS Preset"
preset_menu = "MATERIAL_MT_sss_presets"
preset_defines = [
("material = "
"bpy.context.material.active_node_material "
"if bpy.context.material.active_node_material "
"else bpy.context.material")
]
preset_values = [
"material.subsurface_scattering.back",
"material.subsurface_scattering.color",
"material.subsurface_scattering.color_factor",
"material.subsurface_scattering.error_threshold",
"material.subsurface_scattering.front",
"material.subsurface_scattering.ior",
"material.subsurface_scattering.radius",
"material.subsurface_scattering.scale",
"material.subsurface_scattering.texture_factor",
]
preset_subdir = "sss"
class AddPresetCloth(AddPresetBase, Operator):
"""Add or remove a Cloth Preset"""
bl_idname = "cloth.preset_add"
bl_label = "Add Cloth Preset"
preset_menu = "CLOTH_MT_presets"
preset_defines = [
"cloth = bpy.context.cloth"
]
preset_values = [
"cloth.settings.air_damping",
"cloth.settings.bending_stiffness",
"cloth.settings.mass",
"cloth.settings.quality",
"cloth.settings.spring_damping",
"cloth.settings.structural_stiffness",
]
preset_subdir = "cloth"
class AddPresetFluid(AddPresetBase, Operator):
"""Add or remove a Fluid Preset"""
bl_idname = "fluid.preset_add"
bl_label = "Add Fluid Preset"
preset_menu = "FLUID_MT_presets"
preset_defines = [
"fluid = bpy.context.fluid"
]
preset_values = [
"fluid.settings.viscosity_base",
"fluid.settings.viscosity_exponent",
]
preset_subdir = "fluid"
class AddPresetHairDynamics(AddPresetBase, Operator):
"""Add or remove a Hair Dynamics Preset"""
bl_idname = "particle.hair_dynamics_preset_add"
bl_label = "Add Hair Dynamics Preset"
preset_menu = "PARTICLE_MT_hair_dynamics_presets"
preset_defines = [
"psys = bpy.context.particle_system",
"cloth = bpy.context.particle_system.cloth",
"settings = bpy.context.particle_system.cloth.settings",
"collision = bpy.context.particle_system.cloth.collision_settings",
]
preset_subdir = "hair_dynamics"
preset_values = [
"settings.quality",
"settings.mass",
"settings.bending_stiffness",
"psys.settings.bending_random",
"settings.bending_damping",
"settings.air_damping",
"settings.internal_friction",
"settings.density_target",
"settings.density_strength",
"settings.voxel_cell_size",
"settings.pin_stiffness",
]
class AddPresetSunSky(AddPresetBase, Operator):
"""Add or remove a Sky & Atmosphere Preset"""
bl_idname = "lamp.sunsky_preset_add"
bl_label = "Add Sunsky Preset"
preset_menu = "LAMP_MT_sunsky_presets"
preset_defines = [
"sky = bpy.context.lamp.sky"
]
preset_values = [
"sky.atmosphere_extinction",
"sky.atmosphere_inscattering",
"sky.atmosphere_turbidity",
"sky.backscattered_light",
"sky.horizon_brightness",
"sky.spread",
"sky.sun_brightness",
"sky.sun_intensity",
"sky.sun_size",
"sky.sky_blend",
"sky.sky_blend_type",
"sky.sky_color_space",
"sky.sky_exposure",
]
preset_subdir = "sunsky"
class AddPresetInteraction(AddPresetBase, Operator):
"""Add or remove an Application Interaction Preset"""
bl_idname = "wm.interaction_preset_add"
bl_label = "Add Interaction Preset"
preset_menu = "USERPREF_MT_interaction_presets"
preset_defines = [
"user_preferences = bpy.context.user_preferences"
]
preset_values = [
"user_preferences.edit.use_drag_immediately",
"user_preferences.edit.use_insertkey_xyz_to_rgb",
"user_preferences.inputs.invert_mouse_zoom",
"user_preferences.inputs.select_mouse",
"user_preferences.inputs.use_emulate_numpad",
"user_preferences.inputs.use_mouse_continuous",
"user_preferences.inputs.use_mouse_emulate_3_button",
"user_preferences.inputs.view_rotate_method",
"user_preferences.inputs.view_zoom_axis",
"user_preferences.inputs.view_zoom_method",
]
preset_subdir = "interaction"
class AddPresetTrackingCamera(AddPresetBase, Operator):
"""Add or remove a Tracking Camera Intrinsics Preset"""
bl_idname = "clip.camera_preset_add"
bl_label = "Add Camera Preset"
preset_menu = "CLIP_MT_camera_presets"
preset_defines = [
"camera = bpy.context.edit_movieclip.tracking.camera"
]
preset_subdir = "tracking_camera"
use_focal_length = BoolProperty(
name="Include Focal Length",
description="Include focal length into the preset",
options={'SKIP_SAVE'},
default=True
)
@property
def preset_values(self):
preset_values = [
"camera.sensor_width",
"camera.pixel_aspect",
"camera.k1",
"camera.k2",
"camera.k3"
]
if self.use_focal_length:
preset_values.append("camera.units")
preset_values.append("camera.focal_length")
return preset_values
class AddPresetTrackingTrackColor(AddPresetBase, Operator):
"""Add or remove a Clip Track Color Preset"""
bl_idname = "clip.track_color_preset_add"
bl_label = "Add Track Color Preset"
preset_menu = "CLIP_MT_track_color_presets"
preset_defines = [
"track = bpy.context.edit_movieclip.tracking.tracks.active"
]
preset_values = [
"track.color",
"track.use_custom_color"
]
preset_subdir = "tracking_track_color"
class AddPresetTrackingSettings(AddPresetBase, Operator):
"""Add or remove a motion tracking settings preset"""
bl_idname = "clip.tracking_settings_preset_add"
bl_label = "Add Tracking Settings Preset"
preset_menu = "CLIP_MT_tracking_settings_presets"
preset_defines = [
"settings = bpy.context.edit_movieclip.tracking.settings"
]
preset_values = [
"settings.default_correlation_min",
"settings.default_pattern_size",
"settings.default_search_size",
"settings.default_frames_limit",
"settings.default_pattern_match",
"settings.default_margin",
"settings.default_motion_model",
"settings.use_default_brute",
"settings.use_default_normalization",
"settings.use_default_mask",
"settings.use_default_red_channel",
"settings.use_default_green_channel",
"settings.use_default_blue_channel"
"settings.default_weight"
]
preset_subdir = "tracking_settings"
class AddPresetNodeColor(AddPresetBase, Operator):
"""Add or remove a Node Color Preset"""
bl_idname = "node.node_color_preset_add"
bl_label = "Add Node Color Preset"
preset_menu = "NODE_MT_node_color_presets"
preset_defines = [
"node = bpy.context.active_node"
]
preset_values = [
"node.color",
"node.use_custom_color"
]
preset_subdir = "node_color"
class AddPresetInterfaceTheme(AddPresetBase, Operator):
"""Add or remove a theme preset"""
bl_idname = "wm.interface_theme_preset_add"
bl_label = "Add Theme Preset"
preset_menu = "USERPREF_MT_interface_theme_presets"
preset_subdir = "interface_theme"
class AddPresetKeyconfig(AddPresetBase, Operator):
"""Add or remove a Key-config Preset"""
bl_idname = "wm.keyconfig_preset_add"
bl_label = "Add Keyconfig Preset"
preset_menu = "USERPREF_MT_keyconfigs"
preset_subdir = "keyconfig"
def add(self, context, filepath):
bpy.ops.wm.keyconfig_export(filepath=filepath)
bpy.utils.keyconfig_set(filepath)
def pre_cb(self, context):
keyconfigs = bpy.context.window_manager.keyconfigs
if self.remove_active:
preset_menu_class = getattr(bpy.types, self.preset_menu)
preset_menu_class.bl_label = keyconfigs.active.name
def post_cb(self, context):
keyconfigs = bpy.context.window_manager.keyconfigs
if self.remove_active:
keyconfigs.remove(keyconfigs.active)
class AddPresetOperator(AddPresetBase, Operator):
"""Add or remove an Operator Preset"""
bl_idname = "wm.operator_preset_add"
bl_label = "Operator Preset"
preset_menu = "WM_MT_operator_presets"
operator = StringProperty(
name="Operator",
maxlen=64,
options={'HIDDEN', 'SKIP_SAVE'},
)
preset_defines = [
"op = bpy.context.active_operator",
]
@property
def preset_subdir(self):
return AddPresetOperator.operator_path(self.operator)
@property
def preset_values(self):
properties_blacklist = Operator.bl_rna.properties.keys()
prefix, suffix = self.operator.split("_OT_", 1)
op = getattr(getattr(bpy.ops, prefix.lower()), suffix)
operator_rna = op.get_rna().bl_rna
del op
ret = []
for prop_id, prop in operator_rna.properties.items():
if not (prop.is_hidden or prop.is_skip_save):
if prop_id not in properties_blacklist:
ret.append("op.%s" % prop_id)
return ret
@staticmethod
def operator_path(operator):
import os
prefix, suffix = operator.split("_OT_", 1)
return os.path.join("operator", "%s.%s" % (prefix.lower(), suffix))
class WM_MT_operator_presets(Menu):
bl_label = "Operator Presets"
def draw(self, context):
self.operator = context.active_operator.bl_idname
# dummy 'default' menu item
layout = self.layout
layout.operator("wm.operator_defaults")
layout.separator()
Menu.draw_preset(self, context)
@property
def preset_subdir(self):
return AddPresetOperator.operator_path(self.operator)
preset_operator = "script.execute_preset"
class AddPresetUnitsLength(AddPresetBase, Operator):
"""Add or remove length units preset"""
bl_idname = "scene.units_length_preset_add"
bl_label = "Add Length Units Preset"
preset_menu = "SCENE_MT_units_length_presets"
preset_defines = [
"scene = bpy.context.scene"
]
preset_values = [
"scene.unit_settings.system",
"scene.unit_settings.scale_length",
]
preset_subdir = "units_length"
classes = (
AddPresetCamera,
AddPresetCloth,
AddPresetFluid,
AddPresetHairDynamics,
AddPresetInteraction,
AddPresetInterfaceTheme,
AddPresetKeyconfig,
AddPresetNodeColor,
AddPresetOperator,
AddPresetRender,
AddPresetSSS,
AddPresetSafeAreas,
AddPresetSunSky,
AddPresetTrackingCamera,
AddPresetTrackingSettings,
AddPresetTrackingTrackColor,
AddPresetUnitsLength,
ExecutePreset,
WM_MT_operator_presets,
)
```
#### File: startup/bl_operators/wm.py
```python
import bpy
from bpy.types import Operator
from bpy.props import (
StringProperty,
BoolProperty,
IntProperty,
FloatProperty,
EnumProperty,
)
from bpy.app.translations import pgettext_tip as tip_
rna_path_prop = StringProperty(
name="Context Attributes",
description="RNA context string",
maxlen=1024,
)
rna_reverse_prop = BoolProperty(
name="Reverse",
description="Cycle backwards",
default=False,
)
rna_wrap_prop = BoolProperty(
name="Wrap",
description="Wrap back to the first/last values",
default=False,
)
rna_relative_prop = BoolProperty(
name="Relative",
description="Apply relative to the current value (delta)",
default=False,
)
def context_path_validate(context, data_path):
try:
value = eval("context.%s" % data_path) if data_path else Ellipsis
except AttributeError as e:
if str(e).startswith("'NoneType'"):
# One of the items in the rna path is None, just ignore this
value = Ellipsis
else:
# We have a real error in the rna path, don't ignore that
raise
return value
def operator_value_is_undo(value):
if value in {None, Ellipsis}:
return False
# typical properties or objects
id_data = getattr(value, "id_data", Ellipsis)
if id_data is None:
return False
elif id_data is Ellipsis:
# handle mathutils types
id_data = getattr(getattr(value, "owner", None), "id_data", None)
if id_data is None:
return False
# return True if its a non window ID type
return (isinstance(id_data, bpy.types.ID) and
(not isinstance(id_data, (bpy.types.WindowManager,
bpy.types.Screen,
bpy.types.Brush,
))))
def operator_path_is_undo(context, data_path):
# note that if we have data paths that use strings this could fail
# luckily we don't do this!
#
# When we cant find the data owner assume no undo is needed.
data_path_head = data_path.rpartition(".")[0]
if not data_path_head:
return False
value = context_path_validate(context, data_path_head)
return operator_value_is_undo(value)
def operator_path_undo_return(context, data_path):
return {'FINISHED'} if operator_path_is_undo(context, data_path) else {'CANCELLED'}
def operator_value_undo_return(value):
return {'FINISHED'} if operator_value_is_undo(value) else {'CANCELLED'}
def execute_context_assign(self, context):
data_path = self.data_path
if context_path_validate(context, data_path) is Ellipsis:
return {'PASS_THROUGH'}
if getattr(self, "relative", False):
exec("context.%s += self.value" % data_path)
else:
exec("context.%s = self.value" % data_path)
return operator_path_undo_return(context, data_path)
def module_filesystem_remove(path_base, module_name):
import os
module_name = os.path.splitext(module_name)[0]
for f in os.listdir(path_base):
f_base = os.path.splitext(f)[0]
if f_base == module_name:
f_full = os.path.join(path_base, f)
if os.path.isdir(f_full):
os.rmdir(f_full)
else:
os.remove(f_full)
class BRUSH_OT_active_index_set(Operator):
"""Set active sculpt/paint brush from it's number"""
bl_idname = "brush.active_index_set"
bl_label = "Set Brush Number"
mode = StringProperty(
name="Mode",
description="Paint mode to set brush for",
maxlen=1024,
)
index = IntProperty(
name="Number",
description="Brush number",
)
_attr_dict = {"sculpt": "use_paint_sculpt",
"vertex_paint": "use_paint_vertex",
"weight_paint": "use_paint_weight",
"image_paint": "use_paint_image",
}
def execute(self, context):
attr = self._attr_dict.get(self.mode)
if attr is None:
return {'CANCELLED'}
toolsettings = context.tool_settings
for i, brush in enumerate((cur for cur in bpy.data.brushes if getattr(cur, attr))):
if i == self.index:
getattr(toolsettings, self.mode).brush = brush
return {'FINISHED'}
return {'CANCELLED'}
class WM_OT_context_set_boolean(Operator):
"""Set a context value"""
bl_idname = "wm.context_set_boolean"
bl_label = "Context Set Boolean"
bl_options = {'UNDO', 'INTERNAL'}
data_path = rna_path_prop
value = BoolProperty(
name="Value",
description="Assignment value",
default=True,
)
execute = execute_context_assign
class WM_OT_context_set_int(Operator): # same as enum
"""Set a context value"""
bl_idname = "wm.context_set_int"
bl_label = "Context Set"
bl_options = {'UNDO', 'INTERNAL'}
data_path = rna_path_prop
value = IntProperty(
name="Value",
description="Assign value",
default=0,
)
relative = rna_relative_prop
execute = execute_context_assign
class WM_OT_context_scale_float(Operator):
"""Scale a float context value"""
bl_idname = "wm.context_scale_float"
bl_label = "Context Scale Float"
bl_options = {'UNDO', 'INTERNAL'}
data_path = rna_path_prop
value = FloatProperty(
name="Value",
description="Assign value",
default=1.0,
)
def execute(self, context):
data_path = self.data_path
if context_path_validate(context, data_path) is Ellipsis:
return {'PASS_THROUGH'}
value = self.value
if value == 1.0: # nothing to do
return {'CANCELLED'}
exec("context.%s *= value" % data_path)
return operator_path_undo_return(context, data_path)
class WM_OT_context_scale_int(Operator):
"""Scale an int context value"""
bl_idname = "wm.context_scale_int"
bl_label = "Context Scale Int"
bl_options = {'UNDO', 'INTERNAL'}
data_path = rna_path_prop
value = FloatProperty(
name="Value",
description="Assign value",
default=1.0,
)
always_step = BoolProperty(
name="Always Step",
description="Always adjust the value by a minimum of 1 when 'value' is not 1.0",
default=True,
)
def execute(self, context):
data_path = self.data_path
if context_path_validate(context, data_path) is Ellipsis:
return {'PASS_THROUGH'}
value = self.value
if value == 1.0: # nothing to do
return {'CANCELLED'}
if getattr(self, "always_step", False):
if value > 1.0:
add = "1"
func = "max"
else:
add = "-1"
func = "min"
exec("context.%s = %s(round(context.%s * value), context.%s + %s)" %
(data_path, func, data_path, data_path, add))
else:
exec("context.%s *= value" % data_path)
return operator_path_undo_return(context, data_path)
class WM_OT_context_set_float(Operator): # same as enum
"""Set a context value"""
bl_idname = "wm.context_set_float"
bl_label = "Context Set Float"
bl_options = {'UNDO', 'INTERNAL'}
data_path = rna_path_prop
value = FloatProperty(
name="Value",
description="Assignment value",
default=0.0,
)
relative = rna_relative_prop
execute = execute_context_assign
class WM_OT_context_set_string(Operator): # same as enum
"""Set a context value"""
bl_idname = "wm.context_set_string"
bl_label = "Context Set String"
bl_options = {'UNDO', 'INTERNAL'}
data_path = rna_path_prop
value = StringProperty(
name="Value",
description="Assign value",
maxlen=1024,
)
execute = execute_context_assign
class WM_OT_context_set_enum(Operator):
"""Set a context value"""
bl_idname = "wm.context_set_enum"
bl_label = "Context Set Enum"
bl_options = {'UNDO', 'INTERNAL'}
data_path = rna_path_prop
value = StringProperty(
name="Value",
description="Assignment value (as a string)",
maxlen=1024,
)
execute = execute_context_assign
class WM_OT_context_set_value(Operator):
"""Set a context value"""
bl_idname = "wm.context_set_value"
bl_label = "Context Set Value"
bl_options = {'UNDO', 'INTERNAL'}
data_path = rna_path_prop
value = StringProperty(
name="Value",
description="Assignment value (as a string)",
maxlen=1024,
)
def execute(self, context):
data_path = self.data_path
if context_path_validate(context, data_path) is Ellipsis:
return {'PASS_THROUGH'}
exec("context.%s = %s" % (data_path, self.value))
return operator_path_undo_return(context, data_path)
class WM_OT_context_toggle(Operator):
"""Toggle a context value"""
bl_idname = "wm.context_toggle"
bl_label = "Context Toggle"
bl_options = {'UNDO', 'INTERNAL'}
data_path = rna_path_prop
def execute(self, context):
data_path = self.data_path
if context_path_validate(context, data_path) is Ellipsis:
return {'PASS_THROUGH'}
exec("context.%s = not (context.%s)" % (data_path, data_path))
return operator_path_undo_return(context, data_path)
class WM_OT_context_toggle_enum(Operator):
"""Toggle a context value"""
bl_idname = "wm.context_toggle_enum"
bl_label = "Context Toggle Values"
bl_options = {'UNDO', 'INTERNAL'}
data_path = rna_path_prop
value_1 = StringProperty(
name="Value",
description="Toggle enum",
maxlen=1024,
)
value_2 = StringProperty(
name="Value",
description="Toggle enum",
maxlen=1024,
)
def execute(self, context):
data_path = self.data_path
if context_path_validate(context, data_path) is Ellipsis:
return {'PASS_THROUGH'}
# failing silently is not ideal, but we don't want errors for shortcut
# keys that some values that are only available in a particular context
try:
exec("context.%s = ('%s', '%s')[context.%s != '%s']" %
(data_path, self.value_1,
self.value_2, data_path,
self.value_2,
))
except:
return {'PASS_THROUGH'}
return operator_path_undo_return(context, data_path)
class WM_OT_context_cycle_int(Operator):
"""Set a context value (useful for cycling active material, """ \
"""vertex keys, groups, etc.)"""
bl_idname = "wm.context_cycle_int"
bl_label = "Context Int Cycle"
bl_options = {'UNDO', 'INTERNAL'}
data_path = rna_path_prop
reverse = rna_reverse_prop
wrap = rna_wrap_prop
def execute(self, context):
data_path = self.data_path
value = context_path_validate(context, data_path)
if value is Ellipsis:
return {'PASS_THROUGH'}
if self.reverse:
value -= 1
else:
value += 1
exec("context.%s = value" % data_path)
if self.wrap:
if value != eval("context.%s" % data_path):
# relies on rna clamping integers out of the range
if self.reverse:
value = (1 << 31) - 1
else:
value = -1 << 31
exec("context.%s = value" % data_path)
return operator_path_undo_return(context, data_path)
class WM_OT_context_cycle_enum(Operator):
"""Toggle a context value"""
bl_idname = "wm.context_cycle_enum"
bl_label = "Context Enum Cycle"
bl_options = {'UNDO', 'INTERNAL'}
data_path = rna_path_prop
reverse = rna_reverse_prop
wrap = rna_wrap_prop
def execute(self, context):
data_path = self.data_path
value = context_path_validate(context, data_path)
if value is Ellipsis:
return {'PASS_THROUGH'}
orig_value = value
# Have to get rna enum values
rna_struct_str, rna_prop_str = data_path.rsplit('.', 1)
i = rna_prop_str.find('[')
# just in case we get "context.foo.bar[0]"
if i != -1:
rna_prop_str = rna_prop_str[0:i]
rna_struct = eval("context.%s.rna_type" % rna_struct_str)
rna_prop = rna_struct.properties[rna_prop_str]
if type(rna_prop) != bpy.types.EnumProperty:
raise Exception("expected an enum property")
enums = rna_struct.properties[rna_prop_str].enum_items.keys()
orig_index = enums.index(orig_value)
# Have the info we need, advance to the next item.
#
# When wrap's disabled we may set the value to its self,
# this is done to ensure update callbacks run.
if self.reverse:
if orig_index == 0:
advance_enum = enums[-1] if self.wrap else enums[0]
else:
advance_enum = enums[orig_index - 1]
else:
if orig_index == len(enums) - 1:
advance_enum = enums[0] if self.wrap else enums[-1]
else:
advance_enum = enums[orig_index + 1]
# set the new value
exec("context.%s = advance_enum" % data_path)
return operator_path_undo_return(context, data_path)
class WM_OT_context_cycle_array(Operator):
"""Set a context array value """ \
"""(useful for cycling the active mesh edit mode)"""
bl_idname = "wm.context_cycle_array"
bl_label = "Context Array Cycle"
bl_options = {'UNDO', 'INTERNAL'}
data_path = rna_path_prop
reverse = rna_reverse_prop
def execute(self, context):
data_path = self.data_path
value = context_path_validate(context, data_path)
if value is Ellipsis:
return {'PASS_THROUGH'}
def cycle(array):
if self.reverse:
array.insert(0, array.pop())
else:
array.append(array.pop(0))
return array
exec("context.%s = cycle(context.%s[:])" % (data_path, data_path))
return operator_path_undo_return(context, data_path)
class WM_OT_context_menu_enum(Operator):
bl_idname = "wm.context_menu_enum"
bl_label = "Context Enum Menu"
bl_options = {'UNDO', 'INTERNAL'}
data_path = rna_path_prop
def execute(self, context):
data_path = self.data_path
value = context_path_validate(context, data_path)
if value is Ellipsis:
return {'PASS_THROUGH'}
base_path, prop_string = data_path.rsplit(".", 1)
value_base = context_path_validate(context, base_path)
prop = value_base.bl_rna.properties[prop_string]
def draw_cb(self, context):
layout = self.layout
layout.prop(value_base, prop_string, expand=True)
context.window_manager.popup_menu(draw_func=draw_cb, title=prop.name, icon=prop.icon)
return {'FINISHED'}
class WM_OT_context_pie_enum(Operator):
bl_idname = "wm.context_pie_enum"
bl_label = "Context Enum Pie"
bl_options = {'UNDO', 'INTERNAL'}
data_path = rna_path_prop
def invoke(self, context, event):
wm = context.window_manager
data_path = self.data_path
value = context_path_validate(context, data_path)
if value is Ellipsis:
return {'PASS_THROUGH'}
base_path, prop_string = data_path.rsplit(".", 1)
value_base = context_path_validate(context, base_path)
prop = value_base.bl_rna.properties[prop_string]
def draw_cb(self, context):
layout = self.layout
layout.prop(value_base, prop_string, expand=True)
wm.popup_menu_pie(draw_func=draw_cb, title=prop.name, icon=prop.icon, event=event)
return {'FINISHED'}
class WM_OT_operator_pie_enum(Operator):
bl_idname = "wm.operator_pie_enum"
bl_label = "Operator Enum Pie"
bl_options = {'UNDO', 'INTERNAL'}
data_path = StringProperty(
name="Operator",
description="Operator name (in python as string)",
maxlen=1024,
)
prop_string = StringProperty(
name="Property",
description="Property name (as a string)",
maxlen=1024,
)
def invoke(self, context, event):
wm = context.window_manager
data_path = self.data_path
prop_string = self.prop_string
# same as eval("bpy.ops." + data_path)
op_mod_str, ob_id_str = data_path.split(".", 1)
op = getattr(getattr(bpy.ops, op_mod_str), ob_id_str)
del op_mod_str, ob_id_str
try:
op_rna = op.get_rna()
except KeyError:
self.report({'ERROR'}, "Operator not found: bpy.ops.%s" % data_path)
return {'CANCELLED'}
def draw_cb(self, context):
layout = self.layout
pie = layout.menu_pie()
pie.operator_enum(data_path, prop_string)
wm.popup_menu_pie(draw_func=draw_cb, title=op_rna.bl_rna.name, event=event)
return {'FINISHED'}
class WM_OT_context_set_id(Operator):
"""Set a context value to an ID data-block"""
bl_idname = "wm.context_set_id"
bl_label = "Set Library ID"
bl_options = {'UNDO', 'INTERNAL'}
data_path = rna_path_prop
value = StringProperty(
name="Value",
description="Assign value",
maxlen=1024,
)
def execute(self, context):
value = self.value
data_path = self.data_path
# match the pointer type from the target property to bpy.data.*
# so we lookup the correct list.
data_path_base, data_path_prop = data_path.rsplit(".", 1)
data_prop_rna = eval("context.%s" % data_path_base).rna_type.properties[data_path_prop]
data_prop_rna_type = data_prop_rna.fixed_type
id_iter = None
for prop in bpy.data.rna_type.properties:
if prop.rna_type.identifier == "CollectionProperty":
if prop.fixed_type == data_prop_rna_type:
id_iter = prop.identifier
break
if id_iter:
value_id = getattr(bpy.data, id_iter).get(value)
exec("context.%s = value_id" % data_path)
return operator_path_undo_return(context, data_path)
doc_id = StringProperty(
name="Doc ID",
maxlen=1024,
options={'HIDDEN'},
)
data_path_iter = StringProperty(
description="The data path relative to the context, must point to an iterable")
data_path_item = StringProperty(
description="The data path from each iterable to the value (int or float)")
class WM_OT_context_collection_boolean_set(Operator):
"""Set boolean values for a collection of items"""
bl_idname = "wm.context_collection_boolean_set"
bl_label = "Context Collection Boolean Set"
bl_options = {'UNDO', 'REGISTER', 'INTERNAL'}
data_path_iter = data_path_iter
data_path_item = data_path_item
type = EnumProperty(
name="Type",
items=(('TOGGLE', "Toggle", ""),
('ENABLE', "Enable", ""),
('DISABLE', "Disable", ""),
),
)
def execute(self, context):
data_path_iter = self.data_path_iter
data_path_item = self.data_path_item
items = list(getattr(context, data_path_iter))
items_ok = []
is_set = False
for item in items:
try:
value_orig = eval("item." + data_path_item)
except:
continue
if value_orig is True:
is_set = True
elif value_orig is False:
pass
else:
self.report({'WARNING'}, "Non boolean value found: %s[ ].%s" %
(data_path_iter, data_path_item))
return {'CANCELLED'}
items_ok.append(item)
# avoid undo push when nothing to do
if not items_ok:
return {'CANCELLED'}
if self.type == 'ENABLE':
is_set = True
elif self.type == 'DISABLE':
is_set = False
else:
is_set = not is_set
exec_str = "item.%s = %s" % (data_path_item, is_set)
for item in items_ok:
exec(exec_str)
return operator_value_undo_return(item)
class WM_OT_context_modal_mouse(Operator):
"""Adjust arbitrary values with mouse input"""
bl_idname = "wm.context_modal_mouse"
bl_label = "Context Modal Mouse"
bl_options = {'GRAB_CURSOR', 'BLOCKING', 'UNDO', 'INTERNAL'}
data_path_iter = data_path_iter
data_path_item = data_path_item
header_text = StringProperty(
name="Header Text",
description="Text to display in header during scale",
)
input_scale = FloatProperty(
description="Scale the mouse movement by this value before applying the delta",
default=0.01,
)
invert = BoolProperty(
description="Invert the mouse input",
default=False,
)
initial_x = IntProperty(options={'HIDDEN'})
def _values_store(self, context):
data_path_iter = self.data_path_iter
data_path_item = self.data_path_item
self._values = values = {}
for item in getattr(context, data_path_iter):
try:
value_orig = eval("item." + data_path_item)
except:
continue
# check this can be set, maybe this is library data.
try:
exec("item.%s = %s" % (data_path_item, value_orig))
except:
continue
values[item] = value_orig
def _values_delta(self, delta):
delta *= self.input_scale
if self.invert:
delta = - delta
data_path_item = self.data_path_item
for item, value_orig in self._values.items():
if type(value_orig) == int:
exec("item.%s = int(%d)" % (data_path_item, round(value_orig + delta)))
else:
exec("item.%s = %f" % (data_path_item, value_orig + delta))
def _values_restore(self):
data_path_item = self.data_path_item
for item, value_orig in self._values.items():
exec("item.%s = %s" % (data_path_item, value_orig))
self._values.clear()
def _values_clear(self):
self._values.clear()
def modal(self, context, event):
event_type = event.type
if event_type == 'MOUSEMOVE':
delta = event.mouse_x - self.initial_x
self._values_delta(delta)
header_text = self.header_text
if header_text:
if len(self._values) == 1:
(item, ) = self._values.keys()
header_text = header_text % eval("item.%s" % self.data_path_item)
else:
header_text = (self.header_text % delta) + " (delta)"
context.area.header_text_set(header_text)
elif 'LEFTMOUSE' == event_type:
item = next(iter(self._values.keys()))
self._values_clear()
context.area.header_text_set()
return operator_value_undo_return(item)
elif event_type in {'RIGHTMOUSE', 'ESC'}:
self._values_restore()
context.area.header_text_set()
return {'CANCELLED'}
return {'RUNNING_MODAL'}
def invoke(self, context, event):
self._values_store(context)
if not self._values:
self.report({'WARNING'}, "Nothing to operate on: %s[ ].%s" %
(self.data_path_iter, self.data_path_item))
return {'CANCELLED'}
else:
self.initial_x = event.mouse_x
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
class WM_OT_url_open(Operator):
"""Open a website in the web-browser"""
bl_idname = "wm.url_open"
bl_label = ""
bl_options = {'INTERNAL'}
url = StringProperty(
name="URL",
description="URL to open",
)
def execute(self, context):
import webbrowser
webbrowser.open(self.url)
return {'FINISHED'}
class WM_OT_path_open(Operator):
"""Open a path in a file browser"""
bl_idname = "wm.path_open"
bl_label = ""
bl_options = {'INTERNAL'}
filepath = StringProperty(
subtype='FILE_PATH',
options={'SKIP_SAVE'},
)
def execute(self, context):
import sys
import os
import subprocess
filepath = self.filepath
if not filepath:
self.report({'ERROR'}, "File path was not set")
return {'CANCELLED'}
filepath = bpy.path.abspath(filepath)
filepath = os.path.normpath(filepath)
if not os.path.exists(filepath):
self.report({'ERROR'}, "File '%s' not found" % filepath)
return {'CANCELLED'}
if sys.platform[:3] == "win":
os.startfile(filepath)
elif sys.platform == "darwin":
subprocess.check_call(["open", filepath])
else:
try:
subprocess.check_call(["xdg-open", filepath])
except:
# xdg-open *should* be supported by recent Gnome, KDE, Xfce
import traceback
traceback.print_exc()
return {'FINISHED'}
def _wm_doc_get_id(doc_id, do_url=True, url_prefix=""):
id_split = doc_id.split(".")
url = rna = None
if len(id_split) == 1: # rna, class
if do_url:
url = "%s/bpy.types.%s.html" % (url_prefix, id_split[0])
else:
rna = "bpy.types.%s" % id_split[0]
elif len(id_split) == 2: # rna, class.prop
class_name, class_prop = id_split
# an operator (common case - just button referencing an op)
if hasattr(bpy.types, class_name.upper() + "_OT_" + class_prop):
if do_url:
url = (
"%s/bpy.ops.%s.html#bpy.ops.%s.%s" %
(url_prefix, class_name, class_name, class_prop)
)
else:
rna = "bpy.ops.%s.%s" % (class_name, class_prop)
else:
rna_class = getattr(bpy.types, class_name)
# an operator setting (selected from a running operator), rare case
# note: Py defined operators are subclass of Operator,
# C defined operators are subclass of OperatorProperties.
# we may need to check on this at some point.
if issubclass(rna_class, (bpy.types.Operator, bpy.types.OperatorProperties)):
# note: ignore the prop name since we don't have a way to link into it
class_name, class_prop = class_name.split("_OT_", 1)
class_name = class_name.lower()
if do_url:
url = (
"%s/bpy.ops.%s.html#bpy.ops.%s.%s" %
(url_prefix, class_name, class_name, class_prop)
)
else:
rna = "bpy.ops.%s.%s" % (class_name, class_prop)
else:
# an RNA setting, common case
# detect if this is a inherited member and use that name instead
rna_parent = rna_class.bl_rna
rna_prop = rna_parent.properties.get(class_prop)
if rna_prop:
rna_parent = rna_parent.base
while rna_parent and rna_prop == rna_parent.properties.get(class_prop):
class_name = rna_parent.identifier
rna_parent = rna_parent.base
if do_url:
url = (
"%s/bpy.types.%s.html#bpy.types.%s.%s" %
(url_prefix, class_name, class_name, class_prop)
)
else:
rna = "bpy.types.%s.%s" % (class_name, class_prop)
else:
# We assume this is custom property, only try to generate generic url/rna_id...
if do_url:
url = ("%s/bpy.types.bpy_struct.html#bpy.types.bpy_struct.items" % (url_prefix,))
else:
rna = "bpy.types.bpy_struct"
return url if do_url else rna
class WM_OT_doc_view_manual(Operator):
"""Load online manual"""
bl_idname = "wm.doc_view_manual"
bl_label = "View Manual"
doc_id = doc_id
@staticmethod
def _find_reference(rna_id, url_mapping, verbose=True):
if verbose:
print("online manual check for: '%s'... " % rna_id)
from fnmatch import fnmatchcase
# XXX, for some reason all RNA ID's are stored lowercase
# Adding case into all ID's isn't worth the hassle so force lowercase.
rna_id = rna_id.lower()
for pattern, url_suffix in url_mapping:
if fnmatchcase(rna_id, pattern):
if verbose:
print(" match found: '%s' --> '%s'" % (pattern, url_suffix))
return url_suffix
if verbose:
print("match not found")
return None
@staticmethod
def _lookup_rna_url(rna_id, verbose=True):
for prefix, url_manual_mapping in bpy.utils.manual_map():
rna_ref = WM_OT_doc_view_manual._find_reference(rna_id, url_manual_mapping, verbose=verbose)
if rna_ref is not None:
url = prefix + rna_ref
return url
def execute(self, context):
rna_id = _wm_doc_get_id(self.doc_id, do_url=False)
if rna_id is None:
return {'PASS_THROUGH'}
url = self._lookup_rna_url(rna_id)
if url is None:
self.report(
{'WARNING'},
"No reference available %r, "
"Update info in 'rna_manual_reference.py' "
"or callback to bpy.utils.manual_map()" %
self.doc_id)
return {'CANCELLED'}
else:
import webbrowser
webbrowser.open(url)
return {'FINISHED'}
class WM_OT_doc_view(Operator):
"""Load online reference docs"""
bl_idname = "wm.doc_view"
bl_label = "View Documentation"
doc_id = doc_id
if bpy.app.version_cycle == "release":
_prefix = ("https://docs.blender.org/api/blender_python_api_current")
else:
_prefix = ("https://docs.blender.org/api/blender_python_api_master")
def execute(self, context):
url = _wm_doc_get_id(self.doc_id, do_url=True, url_prefix=self._prefix)
if url is None:
return {'PASS_THROUGH'}
import webbrowser
webbrowser.open(url)
return {'FINISHED'}
rna_path = StringProperty(
name="Property Edit",
description="Property data_path edit",
maxlen=1024,
options={'HIDDEN'},
)
rna_value = StringProperty(
name="Property Value",
description="Property value edit",
maxlen=1024,
)
rna_property = StringProperty(
name="Property Name",
description="Property name edit",
maxlen=1024,
)
rna_min = FloatProperty(
name="Min",
default=-10000.0,
precision=3,
)
rna_max = FloatProperty(
name="Max",
default=10000.0,
precision=3,
)
rna_use_soft_limits = BoolProperty(
name="Use Soft Limits",
)
class WM_OT_properties_edit(Operator):
bl_idname = "wm.properties_edit"
bl_label = "Edit Property"
# register only because invoke_props_popup requires.
bl_options = {'REGISTER', 'INTERNAL'}
data_path = rna_path
property = rna_property
value = rna_value
min = rna_min
max = rna_max
use_soft_limits = rna_use_soft_limits
soft_min = rna_min
soft_max = rna_max
description = StringProperty(
name="Tooltip",
)
def _cmp_props_get(self):
# Changing these properties will refresh the UI
return {
"use_soft_limits": self.use_soft_limits,
"soft_range": (self.soft_min, self.soft_max),
"hard_range": (self.min, self.max),
}
def execute(self, context):
from rna_prop_ui import (
rna_idprop_ui_prop_get,
rna_idprop_ui_prop_clear,
rna_idprop_ui_prop_update,
)
data_path = self.data_path
value = self.value
prop = self.property
prop_old = getattr(self, "_last_prop", [None])[0]
if prop_old is None:
self.report({'ERROR'}, "Direct execution not supported")
return {'CANCELLED'}
try:
value_eval = eval(value)
# assert else None -> None, not "None", see [#33431]
assert(type(value_eval) in {str, float, int, bool, tuple, list})
except:
value_eval = value
# First remove
item = eval("context.%s" % data_path)
prop_type_old = type(item[prop_old])
rna_idprop_ui_prop_clear(item, prop_old)
exec_str = "del item[%r]" % prop_old
# print(exec_str)
exec(exec_str)
# Reassign
exec_str = "item[%r] = %s" % (prop, repr(value_eval))
# print(exec_str)
exec(exec_str)
rna_idprop_ui_prop_update(item, prop)
self._last_prop[:] = [prop]
prop_type = type(item[prop])
prop_ui = rna_idprop_ui_prop_get(item, prop)
if prop_type in {float, int}:
prop_ui["min"] = prop_type(self.min)
prop_ui["max"] = prop_type(self.max)
if self.use_soft_limits:
prop_ui["soft_min"] = prop_type(self.soft_min)
prop_ui["soft_max"] = prop_type(self.soft_max)
else:
prop_ui["soft_min"] = prop_type(self.min)
prop_ui["soft_max"] = prop_type(self.max)
prop_ui["description"] = self.description
# If we have changed the type of the property, update its potential anim curves!
if prop_type_old != prop_type:
data_path = '["%s"]' % bpy.utils.escape_identifier(prop)
done = set()
def _update(fcurves):
for fcu in fcurves:
if fcu not in done and fcu.data_path == data_path:
fcu.update_autoflags(item)
done.add(fcu)
def _update_strips(strips):
for st in strips:
if st.type == 'CLIP' and st.action:
_update(st.action.fcurves)
elif st.type == 'META':
_update_strips(st.strips)
adt = getattr(item, "animation_data", None)
if adt is not None:
if adt.action:
_update(adt.action.fcurves)
if adt.drivers:
_update(adt.drivers)
if adt.nla_tracks:
for nt in adt.nla_tracks:
_update_strips(nt.strips)
# otherwise existing buttons which reference freed
# memory may crash blender [#26510]
# context.area.tag_redraw()
for win in context.window_manager.windows:
for area in win.screen.areas:
area.tag_redraw()
return {'FINISHED'}
def invoke(self, context, event):
from rna_prop_ui import rna_idprop_ui_prop_get
data_path = self.data_path
if not data_path:
self.report({'ERROR'}, "Data path not set")
return {'CANCELLED'}
self._last_prop = [self.property]
item = eval("context.%s" % data_path)
# setup defaults
prop_ui = rna_idprop_ui_prop_get(item, self.property, False) # don't create
if prop_ui:
self.min = prop_ui.get("min", -1000000000)
self.max = prop_ui.get("max", 1000000000)
self.description = prop_ui.get("description", "")
self.soft_min = prop_ui.get("soft_min", self.min)
self.soft_max = prop_ui.get("soft_max", self.max)
self.use_soft_limits = (
self.min != self.soft_min or
self.max != self.soft_max)
# store for comparison
self._cmp_props = self._cmp_props_get()
wm = context.window_manager
return wm.invoke_props_dialog(self)
def check(self, context):
cmp_props = self._cmp_props_get()
changed = False
if self._cmp_props != cmp_props:
if cmp_props["use_soft_limits"]:
if cmp_props["soft_range"] != self._cmp_props["soft_range"]:
self.min = min(self.min, self.soft_min)
self.max = max(self.max, self.soft_max)
changed = True
if cmp_props["hard_range"] != self._cmp_props["hard_range"]:
self.soft_min = max(self.min, self.soft_min)
self.soft_max = min(self.max, self.soft_max)
changed = True
else:
if cmp_props["soft_range"] != cmp_props["hard_range"]:
self.soft_min = self.min
self.soft_max = self.max
changed = True
changed |= (cmp_props["use_soft_limits"] != self._cmp_props["use_soft_limits"])
if changed:
cmp_props = self._cmp_props_get()
self._cmp_props = cmp_props
return changed
def draw(self, context):
layout = self.layout
layout.prop(self, "property")
layout.prop(self, "value")
row = layout.row(align=True)
row.prop(self, "min")
row.prop(self, "max")
layout.prop(self, "use_soft_limits")
row = layout.row(align=True)
row.enabled = self.use_soft_limits
row.prop(self, "soft_min", text="Soft Min")
row.prop(self, "soft_max", text="Soft Max")
layout.prop(self, "description")
class WM_OT_properties_add(Operator):
bl_idname = "wm.properties_add"
bl_label = "Add Property"
bl_options = {'UNDO', 'INTERNAL'}
data_path = rna_path
def execute(self, context):
from rna_prop_ui import (
rna_idprop_ui_prop_get,
rna_idprop_ui_prop_update,
)
data_path = self.data_path
item = eval("context.%s" % data_path)
def unique_name(names):
prop = "prop"
prop_new = prop
i = 1
while prop_new in names:
prop_new = prop + str(i)
i += 1
return prop_new
prop = unique_name({
*item.keys(),
*type(item).bl_rna.properties.keys(),
})
item[prop] = 1.0
rna_idprop_ui_prop_update(item, prop)
# not essential, but without this we get [#31661]
prop_ui = rna_idprop_ui_prop_get(item, prop)
prop_ui["soft_min"] = prop_ui["min"] = 0.0
prop_ui["soft_max"] = prop_ui["max"] = 1.0
return {'FINISHED'}
class WM_OT_properties_context_change(Operator):
"""Jump to a different tab inside the properties editor"""
bl_idname = "wm.properties_context_change"
bl_label = ""
bl_options = {'INTERNAL'}
context = StringProperty(
name="Context",
maxlen=64,
)
def execute(self, context):
context.space_data.context = self.context
return {'FINISHED'}
class WM_OT_properties_remove(Operator):
"""Internal use (edit a property data_path)"""
bl_idname = "wm.properties_remove"
bl_label = "Remove Property"
bl_options = {'UNDO', 'INTERNAL'}
data_path = rna_path
property = rna_property
def execute(self, context):
from rna_prop_ui import (
rna_idprop_ui_prop_clear,
rna_idprop_ui_prop_update,
)
data_path = self.data_path
item = eval("context.%s" % data_path)
prop = self.property
rna_idprop_ui_prop_update(item, prop)
del item[prop]
rna_idprop_ui_prop_clear(item, prop)
return {'FINISHED'}
class WM_OT_keyconfig_activate(Operator):
bl_idname = "wm.keyconfig_activate"
bl_label = "Activate Keyconfig"
filepath = StringProperty(
subtype='FILE_PATH',
)
def execute(self, context):
if bpy.utils.keyconfig_set(self.filepath, report=self.report):
return {'FINISHED'}
else:
return {'CANCELLED'}
class WM_OT_appconfig_default(Operator):
bl_idname = "wm.appconfig_default"
bl_label = "Default Application Configuration"
def execute(self, context):
import os
context.window_manager.keyconfigs.active = context.window_manager.keyconfigs.default
filepath = os.path.join(bpy.utils.preset_paths("interaction")[0], "blender.py")
if os.path.exists(filepath):
bpy.ops.script.execute_preset(
filepath=filepath,
menu_idname="USERPREF_MT_interaction_presets",
)
return {'FINISHED'}
class WM_OT_appconfig_activate(Operator):
bl_idname = "wm.appconfig_activate"
bl_label = "Activate Application Configuration"
filepath = StringProperty(
subtype='FILE_PATH',
)
def execute(self, context):
import os
bpy.utils.keyconfig_set(self.filepath)
filepath = self.filepath.replace("keyconfig", "interaction")
if os.path.exists(filepath):
bpy.ops.script.execute_preset(
filepath=filepath,
menu_idname="USERPREF_MT_interaction_presets",
)
return {'FINISHED'}
class WM_OT_sysinfo(Operator):
"""Generate system information, saved into a text file"""
bl_idname = "wm.sysinfo"
bl_label = "Save System Info"
filepath = StringProperty(
subtype='FILE_PATH',
options={'SKIP_SAVE'},
)
def execute(self, context):
import sys_info
sys_info.write_sysinfo(self.filepath)
return {'FINISHED'}
def invoke(self, context, event):
import os
if not self.filepath:
self.filepath = os.path.join(
os.path.expanduser("~"), "system-info.txt")
wm = context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
class WM_OT_copy_prev_settings(Operator):
"""Copy settings from previous version"""
bl_idname = "wm.copy_prev_settings"
bl_label = "Copy Previous Settings"
def execute(self, context):
import os
import shutil
ver = bpy.app.version
ver_old = ((ver[0] * 100) + ver[1]) - 1
path_src = bpy.utils.resource_path('USER', ver_old // 100, ver_old % 100)
path_dst = bpy.utils.resource_path('USER')
if os.path.isdir(path_dst):
self.report({'ERROR'}, "Target path %r exists" % path_dst)
elif not os.path.isdir(path_src):
self.report({'ERROR'}, "Source path %r does not exist" % path_src)
else:
shutil.copytree(path_src, path_dst, symlinks=True)
# reload recent-files.txt
bpy.ops.wm.read_history()
# don't loose users work if they open the splash later.
if bpy.data.is_saved is bpy.data.is_dirty is False:
bpy.ops.wm.read_homefile()
else:
self.report({'INFO'}, "Reload Start-Up file to restore settings")
return {'FINISHED'}
return {'CANCELLED'}
class WM_OT_blenderplayer_start(Operator):
"""Launch the blender-player with the current blend-file"""
bl_idname = "wm.blenderplayer_start"
bl_label = "Start Game In Player"
def execute(self, context):
import os
import sys
import subprocess
gs = context.scene.game_settings
# these remain the same every execution
blender_bin_path = bpy.app.binary_path
blender_bin_dir = os.path.dirname(blender_bin_path)
ext = os.path.splitext(blender_bin_path)[-1]
player_path = os.path.join(blender_bin_dir, "blenderplayer" + ext)
# done static vars
if sys.platform == "darwin":
player_path = os.path.join(blender_bin_dir, "../../../blenderplayer.app/Contents/MacOS/blenderplayer")
if not os.path.exists(player_path):
self.report({'ERROR'}, "Player path: %r not found" % player_path)
return {'CANCELLED'}
filepath = bpy.data.filepath + '~' if bpy.data.is_saved else os.path.join(bpy.app.tempdir, "game.blend")
bpy.ops.wm.save_as_mainfile('EXEC_DEFAULT', filepath=filepath, copy=True)
# start the command line call with the player path
args = [player_path]
# handle some UI options as command line arguments
args.extend([
"-g", "show_framerate", "=", "%d" % gs.show_framerate_profile,
"-g", "show_profile", "=", "%d" % gs.show_framerate_profile,
"-g", "show_properties", "=", "%d" % gs.show_debug_properties,
"-g", "ignore_deprecation_warnings", "=", "%d" % (not gs.use_deprecation_warnings),
])
# finish the call with the path to the blend file
args.append(filepath)
subprocess.call(args)
os.remove(filepath)
return {'FINISHED'}
class WM_OT_keyconfig_test(Operator):
"""Test key-config for conflicts"""
bl_idname = "wm.keyconfig_test"
bl_label = "Test Key Configuration for Conflicts"
def execute(self, context):
from bpy_extras import keyconfig_utils
wm = context.window_manager
kc = wm.keyconfigs.default
if keyconfig_utils.keyconfig_test(kc):
print("CONFLICT")
return {'FINISHED'}
class WM_OT_keyconfig_import(Operator):
"""Import key configuration from a python script"""
bl_idname = "wm.keyconfig_import"
bl_label = "Import Key Configuration..."
filepath = StringProperty(
subtype='FILE_PATH',
default="keymap.py",
)
filter_folder = BoolProperty(
name="Filter folders",
default=True,
options={'HIDDEN'},
)
filter_text = BoolProperty(
name="Filter text",
default=True,
options={'HIDDEN'},
)
filter_python = BoolProperty(
name="Filter python",
default=True,
options={'HIDDEN'},
)
keep_original = BoolProperty(
name="Keep original",
description="Keep original file after copying to configuration folder",
default=True,
)
def execute(self, context):
import os
from os.path import basename
import shutil
if not self.filepath:
self.report({'ERROR'}, "Filepath not set")
return {'CANCELLED'}
config_name = basename(self.filepath)
path = bpy.utils.user_resource('SCRIPTS', os.path.join("presets", "keyconfig"), create=True)
path = os.path.join(path, config_name)
try:
if self.keep_original:
shutil.copy(self.filepath, path)
else:
shutil.move(self.filepath, path)
except Exception as e:
self.report({'ERROR'}, "Installing keymap failed: %s" % e)
return {'CANCELLED'}
# sneaky way to check we're actually running the code.
if bpy.utils.keyconfig_set(path, report=self.report):
return {'FINISHED'}
else:
return {'CANCELLED'}
def invoke(self, context, event):
wm = context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
# This operator is also used by interaction presets saving - AddPresetBase
class WM_OT_keyconfig_export(Operator):
"""Export key configuration to a python script"""
bl_idname = "wm.keyconfig_export"
bl_label = "Export Key Configuration..."
filepath = StringProperty(
subtype='FILE_PATH',
default="keymap.py",
)
filter_folder = BoolProperty(
name="Filter folders",
default=True,
options={'HIDDEN'},
)
filter_text = BoolProperty(
name="Filter text",
default=True,
options={'HIDDEN'},
)
filter_python = BoolProperty(
name="Filter python",
default=True,
options={'HIDDEN'},
)
def execute(self, context):
from bpy_extras import keyconfig_utils
if not self.filepath:
raise Exception("Filepath not set")
if not self.filepath.endswith(".py"):
self.filepath += ".py"
wm = context.window_manager
keyconfig_utils.keyconfig_export(
wm,
wm.keyconfigs.active,
self.filepath,
)
return {'FINISHED'}
def invoke(self, context, event):
wm = context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
class WM_OT_keymap_restore(Operator):
"""Restore key map(s)"""
bl_idname = "wm.keymap_restore"
bl_label = "Restore Key Map(s)"
all = BoolProperty(
name="All Keymaps",
description="Restore all keymaps to default",
)
def execute(self, context):
wm = context.window_manager
if self.all:
for km in wm.keyconfigs.user.keymaps:
km.restore_to_default()
else:
km = context.keymap
km.restore_to_default()
return {'FINISHED'}
class WM_OT_keyitem_restore(Operator):
"""Restore key map item"""
bl_idname = "wm.keyitem_restore"
bl_label = "Restore Key Map Item"
item_id = IntProperty(
name="Item Identifier",
description="Identifier of the item to remove",
)
@classmethod
def poll(cls, context):
keymap = getattr(context, "keymap", None)
return keymap
def execute(self, context):
km = context.keymap
kmi = km.keymap_items.from_id(self.item_id)
if (not kmi.is_user_defined) and kmi.is_user_modified:
km.restore_item_to_default(kmi)
return {'FINISHED'}
class WM_OT_keyitem_add(Operator):
"""Add key map item"""
bl_idname = "wm.keyitem_add"
bl_label = "Add Key Map Item"
def execute(self, context):
km = context.keymap
if km.is_modal:
km.keymap_items.new_modal("", 'A', 'PRESS')
else:
km.keymap_items.new("none", 'A', 'PRESS')
# clear filter and expand keymap so we can see the newly added item
if context.space_data.filter_text != "":
context.space_data.filter_text = ""
km.show_expanded_items = True
km.show_expanded_children = True
return {'FINISHED'}
class WM_OT_keyitem_remove(Operator):
"""Remove key map item"""
bl_idname = "wm.keyitem_remove"
bl_label = "Remove Key Map Item"
item_id = IntProperty(
name="Item Identifier",
description="Identifier of the item to remove",
)
@classmethod
def poll(cls, context):
return hasattr(context, "keymap")
def execute(self, context):
km = context.keymap
kmi = km.keymap_items.from_id(self.item_id)
km.keymap_items.remove(kmi)
return {'FINISHED'}
class WM_OT_keyconfig_remove(Operator):
"""Remove key config"""
bl_idname = "wm.keyconfig_remove"
bl_label = "Remove Key Config"
@classmethod
def poll(cls, context):
wm = context.window_manager
keyconf = wm.keyconfigs.active
return keyconf and keyconf.is_user_defined
def execute(self, context):
wm = context.window_manager
keyconfig = wm.keyconfigs.active
wm.keyconfigs.remove(keyconfig)
return {'FINISHED'}
class WM_OT_operator_cheat_sheet(Operator):
"""List all the Operators in a text-block, useful for scripting"""
bl_idname = "wm.operator_cheat_sheet"
bl_label = "Operator Cheat Sheet"
def execute(self, context):
op_strings = []
tot = 0
for op_module_name in dir(bpy.ops):
op_module = getattr(bpy.ops, op_module_name)
for op_submodule_name in dir(op_module):
op = getattr(op_module, op_submodule_name)
text = repr(op)
if text.split("\n")[-1].startswith("bpy.ops."):
op_strings.append(text)
tot += 1
op_strings.append('')
textblock = bpy.data.texts.new("OperatorList.txt")
textblock.write('# %d Operators\n\n' % tot)
textblock.write('\n'.join(op_strings))
self.report({'INFO'}, "See OperatorList.txt textblock")
return {'FINISHED'}
# -----------------------------------------------------------------------------
# Add-on Operators
class WM_OT_addon_enable(Operator):
"""Enable an add-on"""
bl_idname = "wm.addon_enable"
bl_label = "Enable Add-on"
module = StringProperty(
name="Module",
description="Module name of the add-on to enable",
)
def execute(self, context):
import addon_utils
err_str = ""
def err_cb(ex):
import traceback
nonlocal err_str
err_str = traceback.format_exc()
print(err_str)
mod = addon_utils.enable(self.module, default_set=True, handle_error=err_cb)
if mod:
info = addon_utils.module_bl_info(mod)
info_ver = info.get("blender", (0, 0, 0))
if info_ver > bpy.app.version:
self.report({'WARNING'},
("This script was written Blender "
"version %d.%d.%d and might not "
"function (correctly), "
"though it is enabled" %
info_ver))
return {'FINISHED'}
else:
if err_str:
self.report({'ERROR'}, err_str)
return {'CANCELLED'}
class WM_OT_addon_disable(Operator):
"""Disable an add-on"""
bl_idname = "wm.addon_disable"
bl_label = "Disable Add-on"
module = StringProperty(
name="Module",
description="Module name of the add-on to disable",
)
def execute(self, context):
import addon_utils
err_str = ""
def err_cb(ex):
import traceback
nonlocal err_str
err_str = traceback.format_exc()
print(err_str)
addon_utils.disable(self.module, default_set=True, handle_error=err_cb)
if err_str:
self.report({'ERROR'}, err_str)
return {'FINISHED'}
class WM_OT_theme_install(Operator):
"""Load and apply a Blender XML theme file"""
bl_idname = "wm.theme_install"
bl_label = "Install Theme..."
overwrite = BoolProperty(
name="Overwrite",
description="Remove existing theme file if exists",
default=True,
)
filepath = StringProperty(
subtype='FILE_PATH',
)
filter_folder = BoolProperty(
name="Filter folders",
default=True,
options={'HIDDEN'},
)
filter_glob = StringProperty(
default="*.xml",
options={'HIDDEN'},
)
def execute(self, context):
import os
import shutil
import traceback
xmlfile = self.filepath
path_themes = bpy.utils.user_resource('SCRIPTS', "presets/interface_theme", create=True)
if not path_themes:
self.report({'ERROR'}, "Failed to get themes path")
return {'CANCELLED'}
path_dest = os.path.join(path_themes, os.path.basename(xmlfile))
if not self.overwrite:
if os.path.exists(path_dest):
self.report({'WARNING'}, "File already installed to %r\n" % path_dest)
return {'CANCELLED'}
try:
shutil.copyfile(xmlfile, path_dest)
bpy.ops.script.execute_preset(
filepath=path_dest,
menu_idname="USERPREF_MT_interface_theme_presets",
)
except:
traceback.print_exc()
return {'CANCELLED'}
return {'FINISHED'}
def invoke(self, context, event):
wm = context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
class WM_OT_addon_refresh(Operator):
"""Scan add-on directories for new modules"""
bl_idname = "wm.addon_refresh"
bl_label = "Refresh"
def execute(self, context):
import addon_utils
addon_utils.modules_refresh()
return {'FINISHED'}
# Note: shares some logic with WM_OT_app_template_install
# but not enough to de-duplicate. Fixed here may apply to both.
class WM_OT_addon_install(Operator):
"""Install an add-on"""
bl_idname = "wm.addon_install"
bl_label = "Install Add-on from File..."
overwrite = BoolProperty(
name="Overwrite",
description="Remove existing add-ons with the same ID",
default=True,
)
target = EnumProperty(
name="Target Path",
items=(('DEFAULT', "Default", ""),
('PREFS', "User Prefs", "")),
)
filepath = StringProperty(
subtype='FILE_PATH',
)
filter_folder = BoolProperty(
name="Filter folders",
default=True,
options={'HIDDEN'},
)
filter_python = BoolProperty(
name="Filter python",
default=True,
options={'HIDDEN'},
)
filter_glob = StringProperty(
default="*.py;*.zip",
options={'HIDDEN'},
)
def execute(self, context):
import addon_utils
import traceback
import zipfile
import shutil
import os
pyfile = self.filepath
if self.target == 'DEFAULT':
# don't use bpy.utils.script_paths("addons") because we may not be able to write to it.
path_addons = bpy.utils.user_resource('SCRIPTS', "addons", create=True)
else:
path_addons = context.user_preferences.filepaths.script_directory
if path_addons:
path_addons = os.path.join(path_addons, "addons")
if not path_addons:
self.report({'ERROR'}, "Failed to get add-ons path")
return {'CANCELLED'}
if not os.path.isdir(path_addons):
try:
os.makedirs(path_addons, exist_ok=True)
except:
traceback.print_exc()
# Check if we are installing from a target path,
# doing so causes 2+ addons of same name or when the same from/to
# location is used, removal of the file!
addon_path = ""
pyfile_dir = os.path.dirname(pyfile)
for addon_path in addon_utils.paths():
if os.path.samefile(pyfile_dir, addon_path):
self.report({'ERROR'}, "Source file is in the add-on search path: %r" % addon_path)
return {'CANCELLED'}
del addon_path
del pyfile_dir
# done checking for exceptional case
addons_old = {mod.__name__ for mod in addon_utils.modules()}
# check to see if the file is in compressed format (.zip)
if zipfile.is_zipfile(pyfile):
try:
file_to_extract = zipfile.ZipFile(pyfile, 'r')
except:
traceback.print_exc()
return {'CANCELLED'}
if self.overwrite:
for f in file_to_extract.namelist():
module_filesystem_remove(path_addons, f)
else:
for f in file_to_extract.namelist():
path_dest = os.path.join(path_addons, os.path.basename(f))
if os.path.exists(path_dest):
self.report({'WARNING'}, "File already installed to %r\n" % path_dest)
return {'CANCELLED'}
try: # extract the file to "addons"
file_to_extract.extractall(path_addons)
except:
traceback.print_exc()
return {'CANCELLED'}
else:
path_dest = os.path.join(path_addons, os.path.basename(pyfile))
if self.overwrite:
module_filesystem_remove(path_addons, os.path.basename(pyfile))
elif os.path.exists(path_dest):
self.report({'WARNING'}, "File already installed to %r\n" % path_dest)
return {'CANCELLED'}
# if not compressed file just copy into the addon path
try:
shutil.copyfile(pyfile, path_dest)
except:
traceback.print_exc()
return {'CANCELLED'}
addons_new = {mod.__name__ for mod in addon_utils.modules()} - addons_old
addons_new.discard("modules")
# disable any addons we may have enabled previously and removed.
# this is unlikely but do just in case. bug [#23978]
for new_addon in addons_new:
addon_utils.disable(new_addon, default_set=True)
# possible the zip contains multiple addons, we could disallow this
# but for now just use the first
for mod in addon_utils.modules(refresh=False):
if mod.__name__ in addons_new:
info = addon_utils.module_bl_info(mod)
# show the newly installed addon.
context.window_manager.addon_filter = 'All'
context.window_manager.addon_search = info["name"]
break
# in case a new module path was created to install this addon.
bpy.utils.refresh_script_paths()
# print message
msg = (
tip_("Modules Installed (%s) from %r into %r") %
(", ".join(sorted(addons_new)), pyfile, path_addons)
)
print(msg)
self.report({'INFO'}, msg)
return {'FINISHED'}
def invoke(self, context, event):
wm = context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
class WM_OT_addon_remove(Operator):
"""Delete the add-on from the file system"""
bl_idname = "wm.addon_remove"
bl_label = "Remove Add-on"
module = StringProperty(
name="Module",
description="Module name of the add-on to remove",
)
@staticmethod
def path_from_addon(module):
import os
import addon_utils
for mod in addon_utils.modules():
if mod.__name__ == module:
filepath = mod.__file__
if os.path.exists(filepath):
if os.path.splitext(os.path.basename(filepath))[0] == "__init__":
return os.path.dirname(filepath), True
else:
return filepath, False
return None, False
def execute(self, context):
import addon_utils
import os
path, isdir = WM_OT_addon_remove.path_from_addon(self.module)
if path is None:
self.report({'WARNING'}, "Add-on path %r could not be found" % path)
return {'CANCELLED'}
# in case its enabled
addon_utils.disable(self.module, default_set=True)
import shutil
if isdir:
shutil.rmtree(path)
else:
os.remove(path)
addon_utils.modules_refresh()
context.area.tag_redraw()
return {'FINISHED'}
# lame confirmation check
def draw(self, context):
self.layout.label(text="Remove Add-on: %r?" % self.module)
path, isdir = WM_OT_addon_remove.path_from_addon(self.module)
self.layout.label(text="Path: %r" % path)
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_props_dialog(self, width=600)
class WM_OT_addon_expand(Operator):
"""Display information and preferences for this add-on"""
bl_idname = "wm.addon_expand"
bl_label = ""
bl_options = {'INTERNAL'}
module = StringProperty(
name="Module",
description="Module name of the add-on to expand",
)
def execute(self, context):
import addon_utils
module_name = self.module
mod = addon_utils.addons_fake_modules.get(module_name)
if mod is not None:
info = addon_utils.module_bl_info(mod)
info["show_expanded"] = not info["show_expanded"]
return {'FINISHED'}
class WM_OT_addon_userpref_show(Operator):
"""Show add-on user preferences"""
bl_idname = "wm.addon_userpref_show"
bl_label = ""
bl_options = {'INTERNAL'}
module = StringProperty(
name="Module",
description="Module name of the add-on to expand",
)
def execute(self, context):
import addon_utils
module_name = self.module
modules = addon_utils.modules(refresh=False)
mod = addon_utils.addons_fake_modules.get(module_name)
if mod is not None:
info = addon_utils.module_bl_info(mod)
info["show_expanded"] = True
context.user_preferences.active_section = 'ADDONS'
context.window_manager.addon_filter = 'All'
context.window_manager.addon_search = info["name"]
bpy.ops.screen.userpref_show('INVOKE_DEFAULT')
return {'FINISHED'}
# Note: shares some logic with WM_OT_addon_install
# but not enough to de-duplicate. Fixes here may apply to both.
class WM_OT_app_template_install(Operator):
"""Install an application-template"""
bl_idname = "wm.app_template_install"
bl_label = "Install Template from File..."
overwrite = BoolProperty(
name="Overwrite",
description="Remove existing template with the same ID",
default=True,
)
filepath = StringProperty(
subtype='FILE_PATH',
)
filter_folder = BoolProperty(
name="Filter folders",
default=True,
options={'HIDDEN'},
)
filter_glob = StringProperty(
default="*.zip",
options={'HIDDEN'},
)
def execute(self, context):
import traceback
import zipfile
import shutil
import os
filepath = self.filepath
path_app_templates = bpy.utils.user_resource(
'SCRIPTS', os.path.join("startup", "bl_app_templates_user"),
create=True,
)
if not path_app_templates:
self.report({'ERROR'}, "Failed to get add-ons path")
return {'CANCELLED'}
if not os.path.isdir(path_app_templates):
try:
os.makedirs(path_app_templates, exist_ok=True)
except:
traceback.print_exc()
app_templates_old = set(os.listdir(path_app_templates))
# check to see if the file is in compressed format (.zip)
if zipfile.is_zipfile(filepath):
try:
file_to_extract = zipfile.ZipFile(filepath, 'r')
except:
traceback.print_exc()
return {'CANCELLED'}
if self.overwrite:
for f in file_to_extract.namelist():
module_filesystem_remove(path_app_templates, f)
else:
for f in file_to_extract.namelist():
path_dest = os.path.join(path_app_templates, os.path.basename(f))
if os.path.exists(path_dest):
self.report({'WARNING'}, "File already installed to %r\n" % path_dest)
return {'CANCELLED'}
try: # extract the file to "bl_app_templates_user"
file_to_extract.extractall(path_app_templates)
except:
traceback.print_exc()
return {'CANCELLED'}
else:
# Only support installing zipfiles
self.report({'WARNING'}, "Expected a zip-file %r\n" % filepath)
return {'CANCELLED'}
app_templates_new = set(os.listdir(path_app_templates)) - app_templates_old
# in case a new module path was created to install this addon.
bpy.utils.refresh_script_paths()
# print message
msg = (
tip_("Template Installed (%s) from %r into %r") %
(", ".join(sorted(app_templates_new)), filepath, path_app_templates)
)
print(msg)
self.report({'INFO'}, msg)
return {'FINISHED'}
def invoke(self, context, event):
wm = context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
classes = (
BRUSH_OT_active_index_set,
WM_OT_addon_disable,
WM_OT_addon_enable,
WM_OT_addon_expand,
WM_OT_addon_install,
WM_OT_addon_refresh,
WM_OT_addon_remove,
WM_OT_addon_userpref_show,
WM_OT_app_template_install,
WM_OT_appconfig_activate,
WM_OT_appconfig_default,
WM_OT_blenderplayer_start,
WM_OT_context_collection_boolean_set,
WM_OT_context_cycle_array,
WM_OT_context_cycle_enum,
WM_OT_context_cycle_int,
WM_OT_context_menu_enum,
WM_OT_context_modal_mouse,
WM_OT_context_pie_enum,
WM_OT_context_scale_float,
WM_OT_context_scale_int,
WM_OT_context_set_boolean,
WM_OT_context_set_enum,
WM_OT_context_set_float,
WM_OT_context_set_id,
WM_OT_context_set_int,
WM_OT_context_set_string,
WM_OT_context_set_value,
WM_OT_context_toggle,
WM_OT_context_toggle_enum,
WM_OT_copy_prev_settings,
WM_OT_doc_view,
WM_OT_doc_view_manual,
WM_OT_keyconfig_activate,
WM_OT_keyconfig_export,
WM_OT_keyconfig_import,
WM_OT_keyconfig_remove,
WM_OT_keyconfig_test,
WM_OT_keyitem_add,
WM_OT_keyitem_remove,
WM_OT_keyitem_restore,
WM_OT_keymap_restore,
WM_OT_operator_cheat_sheet,
WM_OT_operator_pie_enum,
WM_OT_path_open,
WM_OT_properties_add,
WM_OT_properties_context_change,
WM_OT_properties_edit,
WM_OT_properties_remove,
WM_OT_sysinfo,
WM_OT_theme_install,
WM_OT_url_open,
)
```
#### File: tools/check_blender_release/check_release.py
```python
import os
import sys
import unittest
import check_module_enabled
import check_module_numpy
import check_module_requests
import check_static_binaries
from check_utils import sliceCommandLineArguments
def load_tests(loader, standard_tests, pattern):
standard_tests.addTests(loader.loadTestsFromTestCase(
check_module_enabled.UnitTesting))
standard_tests.addTests(loader.loadTestsFromTestCase(
check_module_numpy.UnitTesting))
standard_tests.addTests(loader.loadTestsFromTestCase(
check_module_requests.UnitTesting))
standard_tests.addTests(loader.loadTestsFromTestCase(
check_static_binaries.UnitTesting))
return standard_tests
def main():
# Slice command line arguments by '--'
unittest_args, parser_args = sliceCommandLineArguments()
# Construct and run unit tests.
unittest.main(argv=unittest_args)
if __name__ == "__main__":
main()
```
#### File: qtcreator/externaltools/qtc_doxy_file.py
```python
import sys
import os
import subprocess
import tempfile
def find_gitroot(filepath_reference):
path = filepath_reference
path_prev = ""
while not os.path.exists(os.path.join(path, ".git")) and path != path_prev:
path_prev = path
path = os.path.dirname(path)
return path
doxyfile, sourcefile = sys.argv[-2:]
doxyfile = os.path.join(find_gitroot(sourcefile), doxyfile)
os.chdir(os.path.dirname(doxyfile))
tempfile = tempfile.NamedTemporaryFile(mode='w+b')
doxyfile_tmp = tempfile.name
tempfile.write(open(doxyfile, "r+b").read())
tempfile.write(b'\n\n')
tempfile.write(b'INPUT=' + os.fsencode(sourcefile) + b'\n')
tempfile.flush()
subprocess.call(("doxygen", doxyfile_tmp))
del tempfile
# Maybe handy, but also annoying?
if "--browse" in sys.argv:
import webbrowser
webbrowser.open("html/files.html")
```
#### File: tests/python/bl_pyapi_mathutils.py
```python
import unittest
from mathutils import Matrix, Vector, Quaternion
from mathutils import kdtree
import math
# keep globals immutable
vector_data = (
(1.0, 0.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 0.0, 1.0),
(1.0, 1.0, 1.0),
(0.33783, 0.715698, -0.611206),
(-0.944031, -0.326599, -0.045624),
(-0.101074, -0.416443, -0.903503),
(0.799286, 0.49411, -0.341949),
(-0.854645, 0.518036, 0.033936),
(0.42514, -0.437866, -0.792114),
(-0.358948, 0.597046, 0.717377),
(-0.985413,0.144714, 0.089294),
)
# get data at different scales
vector_data = sum(
(tuple(tuple(a * scale for a in v) for v in vector_data)
for scale in (s * sign for s in (0.0001, 0.1, 1.0, 10.0, 1000.0, 100000.0)
for sign in (1.0, -1.0))), ()) + ((0.0, 0.0, 0.0),)
class MatrixTesting(unittest.TestCase):
def test_matrix_column_access(self):
#mat =
#[ 1 2 3 4 ]
#[ 1 2 3 4 ]
#[ 1 2 3 4 ]
mat = Matrix(((1, 11, 111),
(2, 22, 222),
(3, 33, 333),
(4, 44, 444)))
self.assertEqual(mat[0], Vector((1, 11, 111)))
self.assertEqual(mat[1], Vector((2, 22, 222)))
self.assertEqual(mat[2], Vector((3, 33, 333)))
self.assertEqual(mat[3], Vector((4, 44, 444)))
def test_item_access(self):
args = ((1, 4, 0, -1),
(2, -1, 2, -2),
(0, 3, 8, 3),
(-2, 9, 1, 0))
mat = Matrix(args)
for row in range(4):
for col in range(4):
self.assertEqual(mat[row][col], args[row][col])
self.assertEqual(mat[0][2], 0)
self.assertEqual(mat[3][1], 9)
self.assertEqual(mat[2][3], 3)
self.assertEqual(mat[0][0], 1)
self.assertEqual(mat[3][3], 0)
def test_item_assignment(self):
mat = Matrix() - Matrix()
indices = (0, 0), (1, 3), (2, 0), (3, 2), (3, 1)
checked_indices = []
for row, col in indices:
mat[row][col] = 1
for row in range(4):
for col in range(4):
if mat[row][col]:
checked_indices.append((row, col))
for item in checked_indices:
self.assertIn(item, indices)
def test_matrix_to_3x3(self):
#mat =
#[ 1 2 3 4 ]
#[ 2 4 6 8 ]
#[ 3 6 9 12 ]
#[ 4 8 12 16 ]
mat = Matrix(tuple((i, 2 * i, 3 * i, 4 * i) for i in range(1, 5)))
mat_correct = Matrix(((1, 2, 3), (2, 4, 6), (3, 6, 9)))
self.assertEqual(mat.to_3x3(), mat_correct)
def test_matrix_to_translation(self):
mat = Matrix()
mat[0][3] = 1
mat[1][3] = 2
mat[2][3] = 3
self.assertEqual(mat.to_translation(), Vector((1, 2, 3)))
def test_matrix_translation(self):
mat = Matrix()
mat.translation = Vector((1, 2, 3))
self.assertEqual(mat[0][3], 1)
self.assertEqual(mat[1][3], 2)
self.assertEqual(mat[2][3], 3)
def test_non_square_mult(self):
mat1 = Matrix(((1, 2, 3),
(4, 5, 6)))
mat2 = Matrix(((1, 2),
(3, 4),
(5, 6)))
prod_mat1 = Matrix(((22, 28),
(49, 64)))
prod_mat2 = Matrix(((9, 12, 15),
(19, 26, 33),
(29, 40, 51)))
self.assertEqual(mat1 * mat2, prod_mat1)
self.assertEqual(mat2 * mat1, prod_mat2)
def test_mat4x4_vec3D_mult(self):
mat = Matrix(((1, 0, 2, 0),
(0, 6, 0, 0),
(0, 0, 1, 1),
(0, 0, 0, 1)))
vec = Vector((1, 2, 3))
prod_mat_vec = Vector((7, 12, 4))
prod_vec_mat = Vector((1, 12, 5))
self.assertEqual(mat * vec, prod_mat_vec)
self.assertEqual(vec * mat, prod_vec_mat)
def test_mat_vec_mult(self):
mat1 = Matrix()
vec = Vector((1, 2))
self.assertRaises(ValueError, mat1.__mul__, vec)
self.assertRaises(ValueError, vec.__mul__, mat1)
mat2 = Matrix(((1, 2),
(-2, 3)))
prod = Vector((5, 4))
self.assertEqual(mat2 * vec, prod)
def test_matrix_inverse(self):
mat = Matrix(((1, 4, 0, -1),
(2, -1, 2, -2),
(0, 3, 8, 3),
(-2, 9, 1, 0)))
inv_mat = (1 / 285) * Matrix(((195, -57, 27, -102),
(50, -19, 4, 6),
(-60, 57, 18, 27),
(110, -133, 43, -78)))
self.assertEqual(mat.inverted(), inv_mat)
def test_matrix_inverse_safe(self):
mat = Matrix(((1, 4, 0, -1),
(2, -1, 0, -2),
(0, 3, 0, 3),
(-2, 9, 0, 0)))
# Warning, if we change epsilon in py api we have to update this!!!
epsilon = 1e-8
inv_mat_safe = mat.copy()
inv_mat_safe[0][0] += epsilon
inv_mat_safe[1][1] += epsilon
inv_mat_safe[2][2] += epsilon
inv_mat_safe[3][3] += epsilon
inv_mat_safe.invert()
'''
inv_mat_safe = Matrix(((1.0, -0.5, 0.0, -0.5),
(0.222222, -0.111111, -0.0, 0.0),
(-333333344.0, 316666656.0, 100000000.0, 150000000.0),
(0.888888, -0.9444444, 0.0, -0.5)))
'''
self.assertEqual(mat.inverted_safe(), inv_mat_safe)
def test_matrix_mult(self):
mat = Matrix(((1, 4, 0, -1),
(2, -1, 2, -2),
(0, 3, 8, 3),
(-2, 9, 1, 0)))
prod_mat = Matrix(((11, -9, 7, -9),
(4, -3, 12, 6),
(0, 48, 73, 18),
(16, -14, 26, -13)))
self.assertEqual(mat * mat, prod_mat)
class VectorTesting(unittest.TestCase):
def test_orthogonal(self):
angle_90d = math.pi / 2.0
for v in vector_data:
v = Vector(v)
if v.length_squared != 0.0:
self.assertAlmostEqual(v.angle(v.orthogonal()), angle_90d)
class QuaternionTesting(unittest.TestCase):
def test_to_expmap(self):
q = Quaternion((0, 0, 1), math.radians(90))
e = q.to_exponential_map()
self.assertAlmostEqual(e.x, 0)
self.assertAlmostEqual(e.y, 0)
self.assertAlmostEqual(e.z, math.radians(90), 6)
def test_expmap_axis_normalization(self):
q = Quaternion((1, 1, 0), 2)
e = q.to_exponential_map()
self.assertAlmostEqual(e.x, 2 * math.sqrt(0.5), 6)
self.assertAlmostEqual(e.y, 2 * math.sqrt(0.5), 6)
self.assertAlmostEqual(e.z, 0)
def test_from_expmap(self):
e = Vector((1, 1, 0))
q = Quaternion(e)
axis, angle = q.to_axis_angle()
self.assertAlmostEqual(angle, math.sqrt(2), 6)
self.assertAlmostEqual(axis.x, math.sqrt(0.5), 6)
self.assertAlmostEqual(axis.y, math.sqrt(0.5), 6)
self.assertAlmostEqual(axis.z, 0)
class KDTreeTesting(unittest.TestCase):
@staticmethod
def kdtree_create_grid_3d_data(tot):
index = 0
mul = 1.0 / (tot - 1)
for x in range(tot):
for y in range(tot):
for z in range(tot):
yield (x * mul, y * mul, z * mul), index
index += 1
@staticmethod
def kdtree_create_grid_3d(tot, *, filter_fn=None):
k = kdtree.KDTree(tot * tot * tot)
for co, index in KDTreeTesting.kdtree_create_grid_3d_data(tot):
if (filter_fn is not None) and (not filter_fn(co, index)):
continue
k.insert(co, index)
k.balance()
return k
def assertAlmostEqualVector(self, first, second, places=7, msg=None, delta=None):
self.assertAlmostEqual(first[0], second[0], places=places, msg=msg, delta=delta)
self.assertAlmostEqual(first[1], second[1], places=places, msg=msg, delta=delta)
self.assertAlmostEqual(first[2], second[2], places=places, msg=msg, delta=delta)
def test_kdtree_single(self):
co = (0,) * 3
index = 2
k = kdtree.KDTree(1)
k.insert(co, index)
k.balance()
co_found, index_found, dist_found = k.find(co)
self.assertEqual(tuple(co_found), co)
self.assertEqual(index_found, index)
self.assertEqual(dist_found, 0.0)
def test_kdtree_empty(self):
co = (0,) * 3
k = kdtree.KDTree(0)
k.balance()
co_found, index_found, dist_found = k.find(co)
self.assertIsNone(co_found)
self.assertIsNone(index_found)
self.assertIsNone(dist_found)
def test_kdtree_line(self):
tot = 10
k = kdtree.KDTree(tot)
for i in range(tot):
k.insert((i,) * 3, i)
k.balance()
co_found, index_found, dist_found = k.find((-1,) * 3)
self.assertEqual(tuple(co_found), (0,) * 3)
co_found, index_found, dist_found = k.find((tot,) * 3)
self.assertEqual(tuple(co_found), (tot - 1,) * 3)
def test_kdtree_grid(self):
size = 10
k = self.kdtree_create_grid_3d(size)
# find_range
ret = k.find_range((0.5,) * 3, 2.0)
self.assertEqual(len(ret), size * size * size)
ret = k.find_range((1.0,) * 3, 1.0 / size)
self.assertEqual(len(ret), 1)
ret = k.find_range((1.0,) * 3, 2.0 / size)
self.assertEqual(len(ret), 8)
ret = k.find_range((10,) * 3, 0.5)
self.assertEqual(len(ret), 0)
# find_n
tot = 0
ret = k.find_n((1.0,) * 3, tot)
self.assertEqual(len(ret), tot)
tot = 10
ret = k.find_n((1.0,) * 3, tot)
self.assertEqual(len(ret), tot)
self.assertEqual(ret[0][2], 0.0)
tot = size * size * size
ret = k.find_n((1.0,) * 3, tot)
self.assertEqual(len(ret), tot)
def test_kdtree_grid_filter_simple(self):
size = 10
k = self.kdtree_create_grid_3d(size)
# filter exact index
ret_regular = k.find((1.0,) * 3)
ret_filter = k.find((1.0,) * 3, filter=lambda i: i == ret_regular[1])
self.assertEqual(ret_regular, ret_filter)
ret_filter = k.find((-1.0,) * 3, filter=lambda i: i == ret_regular[1])
self.assertEqual(ret_regular[:2], ret_filter[:2]) # ignore distance
def test_kdtree_grid_filter_pairs(self):
size = 10
k_all = self.kdtree_create_grid_3d(size)
k_odd = self.kdtree_create_grid_3d(size, filter_fn=lambda co, i: (i % 2) == 1)
k_evn = self.kdtree_create_grid_3d(size, filter_fn=lambda co, i: (i % 2) == 0)
samples = 5
mul = 1 / (samples - 1)
for x in range(samples):
for y in range(samples):
for z in range(samples):
co = (x * mul, y * mul, z * mul)
ret_regular = k_odd.find(co)
self.assertEqual(ret_regular[1] % 2, 1)
ret_filter = k_all.find(co, lambda i: (i % 2) == 1)
self.assertAlmostEqualVector(ret_regular, ret_filter)
ret_regular = k_evn.find(co)
self.assertEqual(ret_regular[1] % 2, 0)
ret_filter = k_all.find(co, lambda i: (i % 2) == 0)
self.assertAlmostEqualVector(ret_regular, ret_filter)
# filter out all values (search odd tree for even values and the reverse)
co = (0,) * 3
ret_filter = k_odd.find(co, lambda i: (i % 2) == 0)
self.assertEqual(ret_filter[1], None)
ret_filter = k_evn.find(co, lambda i: (i % 2) == 1)
self.assertEqual(ret_filter[1], None)
def test_kdtree_invalid_size(self):
with self.assertRaises(ValueError):
kdtree.KDTree(-1)
def test_kdtree_invalid_balance(self):
co = (0,) * 3
index = 2
k = kdtree.KDTree(2)
k.insert(co, index)
k.balance()
k.insert(co, index)
with self.assertRaises(RuntimeError):
k.find(co)
def test_kdtree_invalid_filter(self):
k = kdtree.KDTree(1)
k.insert((0,) * 3, 0)
k.balance()
# not callable
with self.assertRaises(TypeError):
k.find((0,) * 3, filter=None)
# no args
with self.assertRaises(TypeError):
k.find((0,) * 3, filter=lambda: None)
# bad return value
with self.assertRaises(ValueError):
k.find((0,) * 3, filter=lambda i: None)
if __name__ == '__main__':
import sys
sys.argv = [__file__] + (sys.argv[sys.argv.index("--") + 1:] if "--" in sys.argv else [])
unittest.main()
``` |
{
"source": "1-MillionParanoidTterabytes/blender-addons-master",
"score": 2
} |
#### File: 1-MillionParanoidTterabytes/blender-addons-master/development_edit_operator.py
```python
bl_info = {
"name": "Edit Operator Source",
"author": "scorpion81",
"version": (1, 2, 2),
"blender": (2, 78, 0),
"location": "Text Editor > Edit > Edit Operator",
"description": "Opens source file of chosen operator, if it is an add-on one",
"warning": "",
"wiki_url": "https://wiki.blender.org/index.php/Extensions:2.6/"
"Py/Scripts/Development/Edit_Operator_Source",
"category": "Development"}
import bpy
import sys
import inspect
from bpy.types import (
Operator,
Panel,
)
from bpy.props import EnumProperty
def get_py_class_from_op(opname):
opid = opname.split(".")
opmod = getattr(bpy.ops, opid[0])
op = getattr(opmod, opid[1])
id = op.get_rna().bl_rna.identifier
# C operators won't be added
return getattr(bpy.types, id, None)
def getmodule(opname):
cls = get_py_class_from_op(opname)
if cls is None:
addon = False
line = -1
mod = 'C operator'
else:
addon = True
mod_name = cls.__module__
try:
line = inspect.getsourcelines(cls)[1]
except IOError:
line = -1
except TypeError:
line = -1
if mod_name == 'bpy.types':
addon = False
elif mod_name != '__main__':
mod = sys.modules[mod_name].__file__
else:
addon = False
mod = mod_name
return mod, line, addon
def get_ops():
allops = []
opsdir = dir(bpy.ops)
for opmodname in opsdir:
opmod = getattr(bpy.ops, opmodname)
opmoddir = dir(opmod)
for o in opmoddir:
name = opmodname + "." + o
cls = get_py_class_from_op(name)
if cls is not None:
allops.append(name)
del opmoddir
# add own operator name too, since its not loaded yet when this is called
allops.append("text.edit_operator")
l = sorted(allops)
del allops
del opsdir
return [(y, y, "", x) for x, y in enumerate(l)]
class EditOperator(Operator):
bl_idname = "text.edit_operator"
bl_label = "Edit Operator"
bl_description = "Opens the source file of operators chosen from Menu"
bl_property = "op"
items = get_ops()
op = EnumProperty(
name="Op",
description="",
items=items
)
def invoke(self, context, event):
context.window_manager.invoke_search_popup(self)
return {'PASS_THROUGH'}
def execute(self, context):
found = False
path, line, addon = getmodule(self.op)
if addon:
for t in bpy.data.texts:
if t.filepath == path:
ctx = context.copy()
ctx['edit_text'] = t
bpy.ops.text.jump(ctx, line=line)
found = True
break
if (found is False):
self.report({'INFO'},
"Opened file: " + path)
bpy.ops.text.open(filepath=path)
bpy.ops.text.jump(line=line)
return {'FINISHED'}
else:
self.report({'WARNING'},
"Found no source file for " + self.op)
return {'CANCELLED'}
class EditOperatorPanel(Panel):
bl_idname = "DEVEDIT_PT_operator"
bl_space_type = 'TEXT_EDITOR'
bl_region_type = 'UI'
bl_label = "Edit Operator"
def draw(self, context):
layout = self.layout
layout.operator("text.edit_operator")
def register():
bpy.utils.register_class(EditOperator)
bpy.utils.register_class(EditOperatorPanel)
def unregister():
bpy.utils.unregister_class(EditOperatorPanel)
bpy.utils.unregister_class(EditOperator)
if __name__ == "__main__":
register()
```
#### File: blender-addons-master/materials_utils/material_converter.py
```python
import bpy
import math
from mathutils import Vector
from bpy.types import Operator
from .warning_messages_utils import (
warning_messages,
c_is_cycles_addon_enabled,
c_data_has_materials,
collect_report,
)
# -----------------------------------------------------------------------------
# Globals
nodesDictionary = None
NODE_FRAME = 'NodeFrame'
BI_MATERIAL_NODE = 'ShaderNodeMaterial'
BI_OUTPUT_NODE = 'ShaderNodeOutput'
TEXTURE_IMAGE_NODE = 'ShaderNodeTexImage'
OUTPUT_NODE = 'ShaderNodeOutputMaterial'
RGB_MIX_NODE = 'ShaderNodeMixRGB'
MAPPING_NODE = 'ShaderNodeMapping'
NORMAL_MAP_NODE = 'ShaderNodeNormalMap'
SHADER_MIX_NODE = 'ShaderNodeMixShader'
SHADER_ADD_NODE = 'ShaderNodeAddShader'
COORD_NODE = 'ShaderNodeTexCoord'
RGB_TO_BW_NODE = 'ShaderNodeRGBToBW'
BSDF_DIFFUSE_NODE = 'ShaderNodeBsdfDiffuse'
BSDF_EMISSION_NODE = 'ShaderNodeEmission'
BSDF_TRANSPARENT_NODE = 'ShaderNodeBsdfTransparent'
BSDF_GLOSSY_NODE = 'ShaderNodeBsdfGlossy'
BSDF_GLASS_NODE = 'ShaderNodeBsdfGlass'
textureNodeSizeX = 150
textureNodeSizeY = 350
# -----------------------------------------------------------------------------
# Functions
def makeTextureNodeDict(cmat):
global nodesDictionary
nodesDictionary = {}
textures = {textureSlot.texture for textureSlot in cmat.texture_slots if textureSlot}
for tex in textures:
texNode = None
if tex.type == 'IMAGE':
texNode = makeNodeUsingImage1(cmat, tex)
if texNode:
nodesDictionary[tex] = texNode
return nodesDictionary
def getTexNodeDic(texture):
return nodesDictionary.get(texture)
def clearNodes(TreeNodes):
TreeNodes.nodes.clear()
def clearCycleMaterial(cmat):
TreeNodes = cmat.node_tree
clearNodes(TreeNodes)
def copyMapping(textureSlot, textureMapping):
textureMapping.scale.x = textureSlot.scale.x
textureMapping.scale.y = textureSlot.scale.y
textureMapping.scale.z = textureSlot.scale.z
def addRGBMixNode(TreeNodes, textureSlot, mixRgbNode, prevTexNode, newTexNode, nodeType, textureIdx):
try:
links = TreeNodes.links
mixRgbNode.name = '{} Mix {:d}'.format(nodeType, textureIdx)
mixRgbNode.blend_type = textureSlot.blend_type
mixRgbNode.inputs['Fac'].default_value = textureSlot.diffuse_color_factor
links.new(prevTexNode.outputs['Color'], mixRgbNode.inputs['Color2'])
links.new(newTexNode.outputs['Color'], mixRgbNode.inputs['Color1'])
except:
collect_report("ERROR: Failure to find link with a Mix node")
def makeBiNodes(cmat):
# Create Blender Internal Material Nodes
TreeNodes = cmat.node_tree
links = TreeNodes.links
BIFrame = TreeNodes.nodes.new(NODE_FRAME)
BIFrame.name = 'BI Frame'
BIFrame.label = 'BI Material'
biShaderNodeMaterial = TreeNodes.nodes.new(BI_MATERIAL_NODE)
biShaderNodeMaterial.parent = BIFrame
biShaderNodeMaterial.name = 'BI Material'
biShaderNodeMaterial.material = cmat
biShaderNodeMaterial.location = 0, 600
biShaderNodeOutput = TreeNodes.nodes.new(BI_OUTPUT_NODE)
biShaderNodeOutput.parent = BIFrame
biShaderNodeOutput.name = 'BI Output'
biShaderNodeOutput.location = 200, 600
try:
links.new(biShaderNodeMaterial.outputs['Color'], biShaderNodeOutput.inputs['Color'])
links.new(biShaderNodeMaterial.outputs['Alpha'], biShaderNodeOutput.inputs['Alpha'])
except:
collect_report("ERROR: Failure to find links with the BI Shader Material")
def placeNode(node, posX, posY, deltaX, deltaY, countX, countY):
nodeX = posX - (deltaX * countX)
nodeY = posY - (deltaY * countY)
node.location = nodeX, nodeY
def makeImageTextureNode(TreeNodes, img):
texNode = TreeNodes.nodes.new(TEXTURE_IMAGE_NODE)
texNode.image = img
return texNode
def makeNodeUsingImage1(cmat, texture):
TreeNodes = cmat.node_tree
img = texture.image
texNode = makeImageTextureNode(TreeNodes, img)
return texNode
def makeMainShader(TreeNodes):
mainShader = TreeNodes.nodes.new(BSDF_DIFFUSE_NODE)
mainShader.name = 'Diffuse BSDF'
mainShader.location = 0, 0
return mainShader
def makeEmissionShader(TreeNodes):
mainShader = TreeNodes.nodes.new(BSDF_EMISSION_NODE)
mainShader.name = 'Emmission'
mainShader.location = 0, 0
return mainShader
def makeMaterialOutput(TreeNodes):
shout = TreeNodes.nodes.new(OUTPUT_NODE)
shout.location = 200, 0
return shout
def replaceNode(oldNode, newNode):
newNode.location = oldNode.location
try:
for link in oldNode.outputs['BSDF'].links:
link.new(newNode.outputs['BSDF'], link.to_socket)
for link in oldNode.inputs['Color'].links:
link.new(newNode.inputs['Color'], link.from_socket)
for link in oldNode.inputs['Normal'].links:
link.new(newNode.inputs['Normal'], link.from_socket)
except:
collect_report("ERROR: Failure to replace node")
def BIToCycleTexCoord(links, textureSlot, texCoordNode, textureMappingNode):
# Texture Coordinates
linkOutput = None
if textureSlot.texture_coords in {'TANGENT', 'STRESS', 'STRAND'}:
linkOutput = None
elif textureSlot.texture_coords == 'REFLECTION':
linkOutput = 'Reflection'
elif textureSlot.texture_coords == 'NORMAL':
linkOutput = 'Normal'
elif textureSlot.texture_coords == 'WINDOW':
linkOutput = 'Window'
elif textureSlot.texture_coords == 'UV':
linkOutput = 'UV'
elif textureSlot.texture_coords == 'ORCO':
linkOutput = 'Generated'
elif textureSlot.texture_coords == 'OBJECT':
linkOutput = 'Object'
elif textureSlot.texture_coords == 'GLOBAL':
linkOutput = 'Camera'
if linkOutput:
links.new(texCoordNode.outputs[linkOutput], textureMappingNode.inputs['Vector'])
def createDiffuseNodes(cmat, texCoordNode, mainShader, materialOutput):
TreeNodes = cmat.node_tree
links = TreeNodes.links
texCount = len([node for node in TreeNodes.nodes if node.type == 'MAPPING'])
currPosY = -textureNodeSizeY * texCount
textureSlots = [textureSlot for textureSlot in cmat.texture_slots if
(textureSlot and textureSlot.use_map_color_diffuse)]
texCount = len(textureSlots)
texNode = None
latestNode = None
groupName = 'Diffuse'
if any(textureSlots):
diffuseFrame = TreeNodes.nodes.new(NODE_FRAME)
diffuseFrame.name = '{} Frame'.format(groupName)
diffuseFrame.label = '{}'.format(groupName)
for textureIdx, textureSlot in enumerate(textureSlots):
texNode = getTexNodeDic(textureSlot.texture)
if texNode:
tex_node_name = getattr(texNode.image, "name", "")
collect_report("INFO: Generating {} Nodes for: ".format(groupName) + tex_node_name)
texNode.parent = diffuseFrame
placeNode(texNode, -500 - ((texCount - 1) * 200),
currPosY, textureNodeSizeX, textureNodeSizeY, 0, textureIdx)
# Add mapping node
textureMapping = TreeNodes.nodes.new(MAPPING_NODE)
textureMapping.parent = diffuseFrame
renameNode(textureMapping, '{} Mapping'.format(groupName), texCount, textureIdx)
textureMapping.location = texNode.location + Vector((-400, 0))
copyMapping(textureSlot, textureMapping)
# Texture Coordinates
BIToCycleTexCoord(links, textureSlot, texCoordNode, textureMapping)
# Place the texture node
renameNode(texNode, '{} Texture'.format(groupName), texCount, textureIdx)
links.new(textureMapping.outputs['Vector'], texNode.inputs['Vector'])
# Add multiply node
colorMult = TreeNodes.nodes.new(RGB_MIX_NODE)
colorMult.parent = diffuseFrame
renameNode(colorMult, 'Color Mult', texCount, textureIdx)
colorMult.blend_type = 'MIX'
colorMult.inputs['Fac'].default_value = 1
colorMult.inputs['Color1'].default_value = (1, 1, 1, 1)
colorMult.location = texNode.location + Vector((200, 0))
links.new(texNode.outputs['Color'], colorMult.inputs['Color2'])
texNode = colorMult
if textureSlot.use and textureIdx == 0:
latestNode = texNode
if textureSlot.use and textureIdx > 0:
try:
# Create a node to mix multiple texture nodes
mixRgbNode = TreeNodes.nodes.new(RGB_MIX_NODE)
mixRgbNode.parent = diffuseFrame
addRGBMixNode(TreeNodes, textureSlot, mixRgbNode, texNode, latestNode,
'{}'.format(groupName), textureIdx)
mixRgbNode.location = Vector(
(max(texNode.location.x, latestNode.location.x),
(texNode.location.y + latestNode.location.y) / 2)) + Vector((200, 0)
)
latestNode = mixRgbNode
except:
continue
if latestNode:
links.new(latestNode.outputs['Color'], mainShader.inputs['Color'])
# Y Position next texture node
currPosY = currPosY - (textureNodeSizeY * (texCount))
# BI Material to Cycles - Alpha Transparency
textureSlots = [textureSlot for textureSlot in cmat.texture_slots if
(textureSlot and textureSlot.use_map_alpha)]
texCount = len(textureSlots)
texNode = None
latestNode = None
for textureIdx, textureSlot in enumerate(textureSlots):
texNode = getTexNodeDic(textureSlot.texture)
if texNode:
tex_node_name = getattr(texNode.image, "name", "")
collect_report("INFO: Generating Transparency Nodes for: " + tex_node_name)
if textureSlot.use and textureIdx == 0:
latestNode = texNode
if textureSlot.use and textureIdx > 0:
try:
# Create a node to mix multiple texture nodes
mixAlphaNode = TreeNodes.nodes.new(RGB_MIX_NODE)
mixAlphaNode.name = 'Alpha Mix {:d}'.format(textureIdx)
mixAlphaNode.blend_type = textureSlot.blend_type
mixAlphaNode.inputs['Fac'].default_value = textureSlot.diffuse_color_factor
placeNode(mixAlphaNode, -200 - ((texCount - textureIdx - 1) * 200), 400 - 240,
textureNodeSizeX, textureNodeSizeY, 0, 0)
links.new(texNode.outputs['Alpha'], mixAlphaNode.inputs['Color2'])
links.new(latestNode.outputs['Alpha'], mixAlphaNode.inputs['Color1'])
latestNode = mixAlphaNode
except:
continue
if latestNode:
alphaMixShader = TreeNodes.nodes.get('Alpha Mix Shader')
if alphaMixShader:
if latestNode.type == 'TEX_IMAGE':
outputLink = 'Alpha'
else:
outputLink = 'Color'
links.new(latestNode.outputs[outputLink], alphaMixShader.inputs['Fac'])
def createNormalNodes(cmat, texCoordNode, mainShader, materialOutput):
TreeNodes = cmat.node_tree
links = TreeNodes.links
texCount = len([node for node in TreeNodes.nodes if node.type == 'MAPPING'])
currPosY = -textureNodeSizeY * texCount
textureSlots = [textureSlot for textureSlot in cmat.texture_slots if
(textureSlot and textureSlot.use_map_normal)]
texCount = len(textureSlots)
texNode = None
latestNode = None
groupName = 'Normal'
if any(textureSlots):
normalFrame = TreeNodes.nodes.new(NODE_FRAME)
normalFrame.name = '{} Frame'.format(groupName)
normalFrame.label = '{}'.format(groupName)
for textureIdx, textureSlot in enumerate(textureSlots):
texNode = getTexNodeDic(textureSlot.texture)
if texNode:
tex_node_name = getattr(texNode.image, "name", "")
collect_report("INFO: Generating Normal Nodes for: " + tex_node_name)
texNode.parent = normalFrame
placeNode(texNode, -500 - ((texCount) * 200), currPosY,
textureNodeSizeX, textureNodeSizeY, 0, textureIdx)
# Add mapping node
normalMapping = TreeNodes.nodes.new(MAPPING_NODE)
normalMapping.parent = normalFrame
renameNode(normalMapping, '{} Mapping'.format(groupName), texCount, textureIdx)
normalMapping.location = texNode.location + Vector((-400, 0))
copyMapping(textureSlot, normalMapping)
# Texture Coordinates
BIToCycleTexCoord(links, textureSlot, texCoordNode, normalMapping)
# Place the texture node
renameNode(texNode, '{} Texture'.format(groupName), texCount, textureIdx)
texNode.color_space = 'NONE'
links.new(normalMapping.outputs['Vector'], texNode.inputs['Vector'])
# Add multiply node
normalMult = TreeNodes.nodes.new(RGB_MIX_NODE)
normalMult.parent = normalFrame
renameNode(normalMult, 'Normal Mult', texCount, textureIdx)
normalMult.blend_type = 'MIX'
normalMult.inputs['Fac'].default_value = 1
normalMult.inputs['Color1'].default_value = (.5, .5, 1, 1)
normalMult.location = texNode.location + Vector((200, 0))
links.new(texNode.outputs['Color'], normalMult.inputs['Color2'])
texNode = normalMult
if textureSlot.use and textureIdx == 0:
latestNode = texNode
if textureSlot.use and textureIdx > 0:
try:
# Create a node to mix multiple texture nodes
mixRgbNode = TreeNodes.nodes.new(RGB_MIX_NODE)
mixRgbNode.parent = normalFrame
addRGBMixNode(TreeNodes, textureSlot, mixRgbNode, texNode, latestNode,
'{}'.format(groupName), textureIdx)
mixRgbNode.location = Vector(
(max(texNode.location.x, latestNode.location.x),
(texNode.location.y + latestNode.location.y) / 2)) + Vector((200, 0)
)
latestNode = mixRgbNode
except:
continue
if latestNode:
normalMapNode = TreeNodes.nodes.new(NORMAL_MAP_NODE)
normalMapNode.parent = normalFrame
normalMapNode.location = latestNode.location + Vector((200, 0))
links.new(latestNode.outputs['Color'], normalMapNode.inputs['Color'])
links.new(normalMapNode.outputs['Normal'], mainShader.inputs['Normal'])
def createSpecularNodes(cmat, texCoordNode, mainShader, mainDiffuse, materialOutput):
TreeNodes = cmat.node_tree
links = TreeNodes.links
texCount = len([node for node in TreeNodes.nodes if node.type == 'MAPPING'])
currPosY = -textureNodeSizeY * texCount
textureSlots = [textureSlot for textureSlot in cmat.texture_slots if
(textureSlot and textureSlot.use_map_color_spec)]
texCount = len(textureSlots)
texNode = None
latestNode = None
groupName = 'Specular'
if any(textureSlots):
specularFrame = TreeNodes.nodes.new(NODE_FRAME)
specularFrame.name = '{} Frame'.format(groupName)
specularFrame.label = '{}'.format(groupName)
for textureIdx, textureSlot in enumerate(textureSlots):
texNode = getTexNodeDic(textureSlot.texture)
if texNode:
tex_node_name = getattr(texNode.image, "name", "")
collect_report("INFO: Generating {} Nodes for: ".format(groupName) + tex_node_name)
texNode.parent = specularFrame
placeNode(texNode, -500 - ((texCount) * 200),
currPosY, textureNodeSizeX, textureNodeSizeY, 0, textureIdx)
# Add mapping node
specularMapping = TreeNodes.nodes.new(MAPPING_NODE)
specularMapping.parent = specularFrame
renameNode(specularMapping, '{} Mapping'.format(groupName), texCount, textureIdx)
specularMapping.location = texNode.location + Vector((-400, 0))
copyMapping(textureSlot, specularMapping)
# Texture Coordinates
BIToCycleTexCoord(links, textureSlot, texCoordNode, specularMapping)
# Place the texture node
renameNode(texNode, '{} Texture'.format(groupName), texCount, textureIdx)
links.new(specularMapping.outputs['Vector'], texNode.inputs['Vector'])
# Add multiply node
specularMult = TreeNodes.nodes.new(RGB_MIX_NODE)
specularMult.parent = specularFrame
renameNode(specularMult, 'Specular Mult', texCount, textureIdx)
specularMult.blend_type = 'MULTIPLY'
specularMult.inputs['Fac'].default_value = 1
specularMult.inputs['Color1'].default_value = (1, 1, 1, 1)
specularMult.location = texNode.location + Vector((200, 0))
links.new(texNode.outputs['Color'], specularMult.inputs['Color2'])
texNode = specularMult
if textureSlot.use and textureIdx == 0:
latestNode = texNode
if textureSlot.use and textureIdx > 0:
try:
# Create a node to mix multiple texture nodes
mixRgbNode = TreeNodes.nodes.new(RGB_MIX_NODE)
mixRgbNode.parent = specularFrame
addRGBMixNode(TreeNodes, textureSlot, mixRgbNode, texNode, latestNode,
'{}'.format(groupName), textureIdx)
mixRgbNode.location = Vector(
(max(texNode.location.x, latestNode.location.x),
(texNode.location.y + latestNode.location.y) / 2)) + Vector((200, 0)
)
latestNode = mixRgbNode
except:
continue
if latestNode:
try:
glossShader = TreeNodes.nodes.new(BSDF_GLOSSY_NODE)
RGBToBW = TreeNodes.nodes.new(RGB_TO_BW_NODE)
RGBToBW.location = Vector((0, latestNode.location.y)) + Vector((0, 0))
glossShader.location = Vector((0, latestNode.location.y)) + Vector((0, -80))
links.new(latestNode.outputs['Color'], glossShader.inputs['Color'])
links.new(latestNode.outputs['Color'], RGBToBW.inputs['Color'])
outputNode = TreeNodes.nodes.get('Material Output')
spec_mixer_1 = TreeNodes.nodes.new(SHADER_MIX_NODE)
spec_mixer_1.location = outputNode.location
spec_mixer_2 = TreeNodes.nodes.new(SHADER_MIX_NODE)
spec_mixer_2.inputs['Fac'].default_value = .4
spec_mixer_2.location = outputNode.location + Vector((180, 0))
links.new(spec_mixer_1.outputs['Shader'], spec_mixer_2.inputs[2])
links.new(spec_mixer_2.outputs['Shader'], outputNode.inputs['Surface'])
links.new(RGBToBW.outputs['Val'], spec_mixer_1.inputs['Fac'])
links.new(glossShader.outputs['BSDF'], spec_mixer_1.inputs[2])
outputNode.location += Vector((360, 0))
normalMapNode = TreeNodes.nodes.get('Normal Map')
links.new(normalMapNode.outputs['Normal'], glossShader.inputs['Normal'])
if mainDiffuse.type == 'BSDF_DIFFUSE':
outputLink = 'BSDF'
else:
outputLink = 'Shader'
links.new(mainDiffuse.outputs[outputLink], spec_mixer_1.inputs[1])
links.new(mainDiffuse.outputs[outputLink], spec_mixer_2.inputs[1])
except:
return
def createEmissionNodes(cmat, texCoordNode, mainShader, materialOutput):
TreeNodes = cmat.node_tree
links = TreeNodes.links
texCount = len([node for node in TreeNodes.nodes if node.type == 'MAPPING'])
currPosY = -textureNodeSizeY * texCount
textureSlots = [textureSlot for textureSlot in cmat.texture_slots if
(textureSlot and textureSlot.use_map_emit)]
texCount = len(textureSlots)
texNode = None
latestNode = None
groupName = 'Emission'
if any(textureSlots):
emissionFrame = TreeNodes.nodes.new(NODE_FRAME)
emissionFrame.name = '{} Frame'.format(groupName)
emissionFrame.label = '{}'.format(groupName)
for textureIdx, textureSlot in enumerate(textureSlots):
texNode = getTexNodeDic(textureSlot.texture)
if texNode:
tex_node_name = getattr(texNode.image, "name", "")
collect_report("INFO: Generating {} Nodes for: ".format(groupName) + tex_node_name)
texNode.parent = emissionFrame
placeNode(texNode, -500 - ((texCount) * 200), currPosY,
textureNodeSizeX, textureNodeSizeY, 0, textureIdx)
# Add mapping node
emissionMapping = TreeNodes.nodes.new(MAPPING_NODE)
emissionMapping.parent = emissionFrame
renameNode(emissionMapping, '{} Mapping'.format(groupName), texCount, textureIdx)
emissionMapping.location = texNode.location + Vector((-400, 0))
copyMapping(textureSlot, emissionMapping)
# Texture Coordinates
BIToCycleTexCoord(links, textureSlot, texCoordNode, emissionMapping)
# Place the texture node
renameNode(texNode, '{} Texture'.format(groupName), texCount, textureIdx)
texNode.color_space = 'NONE'
links.new(emissionMapping.outputs['Vector'], texNode.inputs['Vector'])
# Add multiply node
emissionMult = TreeNodes.nodes.new(RGB_MIX_NODE)
emissionMult.parent = emissionFrame
renameNode(emissionMult, 'Emission Mult', texCount, textureIdx)
emissionMult.blend_type = 'MIX'
emissionMult.inputs['Fac'].default_value = 1
emissionMult.inputs['Color1'].default_value = (0, 0, 0, 1)
emissionMult.location = texNode.location + Vector((200, 0))
links.new(texNode.outputs['Color'], emissionMult.inputs['Color2'])
texNode = emissionMult
if textureSlot.use and textureIdx == 0:
latestNode = texNode
if textureSlot.use and textureIdx > 0:
try:
# Create a node to mix multiple texture nodes
mixRgbNode = TreeNodes.nodes.new(RGB_MIX_NODE)
mixRgbNode.parent = emissionFrame
addRGBMixNode(TreeNodes, textureSlot, mixRgbNode, texNode, latestNode,
'{}'.format(groupName), textureIdx)
mixRgbNode.location = Vector(
(max(texNode.location.x, latestNode.location.x),
(texNode.location.y + latestNode.location.y) / 2)) + Vector((200, 0)
)
latestNode = mixRgbNode
except:
continue
if latestNode:
try:
emissionNode = TreeNodes.nodes.new(BSDF_EMISSION_NODE)
emissionNode.inputs['Strength'].default_value = 1
addShaderNode = TreeNodes.nodes.new(SHADER_ADD_NODE)
addShaderNode.location = materialOutput.location + Vector((0, -100))
xPos = mainShader.location.x
yPos = latestNode.location.y
emissionNode.location = Vector((xPos, yPos))
materialOutput.location += Vector((400, 0))
node = materialOutput.inputs[0].links[0].from_node
node.location += Vector((400, 0))
links.new(latestNode.outputs['Color'], emissionNode.inputs['Color'])
links.new(emissionNode.outputs['Emission'], addShaderNode.inputs[1])
links.new(mainShader.outputs['BSDF'], addShaderNode.inputs[0])
links.new(addShaderNode.outputs['Shader'], node.inputs[2])
except:
return
def renameNode(node, baseName, nodesCount, nodeIndex):
if nodesCount == 1:
node.name = baseName
else:
node.name = '{} {:d}'.format(baseName, nodeIndex + 1)
def hasAlphaTex(cmat):
tex_is_transp = False
for textureSlot in cmat.texture_slots:
if textureSlot:
if textureSlot.use:
if textureSlot.use_map_alpha:
tex_is_transp = tex_is_transp or True
return tex_is_transp
def AutoNode(active=False, operator=None):
collect_report("________________________________________", True, False)
collect_report("START CYCLES CONVERSION")
if active:
materials = [mat for obj in bpy.context.selected_objects if
obj.type == 'MESH' for mat in obj.data.materials]
else:
materials = bpy.data.materials
# No Materials for the chosen action - abort
if not materials:
if operator:
if active:
warning_messages(operator, 'CONV_NO_SEL_MAT', override=True)
else:
warning_messages(operator, 'CONV_NO_SC_MAT', override=True)
return
for cmat in materials:
# check for empty material (it will fall through the first check)
test_empty = getattr(cmat, "name", None)
if test_empty is None:
collect_report("INFO: An empty material was hit, skipping")
continue
else:
cmat.use_nodes = True
clearCycleMaterial(cmat)
makeBiNodes(cmat)
makeCyclesFromBI(cmat)
collect_report("Conversion finished !", False, True)
bpy.context.scene.render.engine = 'CYCLES'
def makeCyclesFromBI(cmat):
mat_name = getattr(cmat, "name", "NO NAME")
collect_report("Converting Material: " + mat_name)
global nodesDictionary
TreeNodes = cmat.node_tree
links = TreeNodes.links
# Convert this material from non-nodes to Cycles nodes
mainShader = None
mainDiffuse = None
Mix_Alpha = None
tex_is_transp = hasAlphaTex(cmat)
cmat_use_transp = cmat.use_transparency and cmat.alpha < 1
cmat_trans_method = cmat.transparency_method
cmat_ior = cmat.raytrace_transparency.ior
cmat_transp_z = cmat_use_transp and cmat_trans_method == 'Z_TRANSPARENCY'
cmat_transp_ray = cmat_use_transp and cmat_trans_method == 'RAYTRACE' and cmat_ior == 1
cmat_mirror = cmat.raytrace_mirror.use
cmat_mirror_fac = cmat.raytrace_mirror.reflect_factor
# Material Shaders
# Diffuse nodes
# --------------------------------------
# Make Diffuse and Output nodes
mainShader = makeMainShader(TreeNodes)
mainShader.inputs['Roughness'].default_value = math.sqrt(max(cmat.specular_intensity, 0.0))
mainDiffuse = mainShader
materialOutput = makeMaterialOutput(TreeNodes)
links.new(mainShader.outputs['BSDF'], materialOutput.inputs['Surface'])
texCoordNode = TreeNodes.nodes.new(COORD_NODE)
texCoordNode.name = 'Texture Coordinate'
# Material Transparent
if not cmat_mirror and cmat_use_transp and tex_is_transp and (cmat_transp_z or cmat_transp_ray):
collect_report("INFO: Make TRANSPARENT material nodes: " + cmat.name)
Mix_Alpha = TreeNodes.nodes.new(SHADER_MIX_NODE)
Mix_Alpha.name = 'Alpha Mix Shader'
Mix_Alpha.location = materialOutput.location
materialOutput.location += Vector((180, 0))
Mix_Alpha.inputs['Fac'].default_value = cmat.alpha
transparentShader = TreeNodes.nodes.new(BSDF_TRANSPARENT_NODE)
transparentShader.location = mainShader.location
mainShader.location += Vector((0, -100))
links.new(transparentShader.outputs['BSDF'], Mix_Alpha.inputs[1])
links.new(mainShader.outputs['BSDF'], Mix_Alpha.inputs[2])
links.new(Mix_Alpha.outputs['Shader'], materialOutput.inputs['Surface'])
mainDiffuse = Mix_Alpha
if cmat_mirror and cmat_mirror_fac > 0.001:
if cmat_use_transp:
# Material Glass
collect_report("INFO: Make GLASS shader node: " + cmat.name)
newShader = TreeNodes.nodes.new(BSDF_GLASS_NODE)
shader = newShader
replaceNode(shader, newShader)
TreeNodes.nodes.remove(shader)
else:
# Material Mirror
collect_report("INFO: Make MIRROR shader node: " + cmat.name)
newShader = TreeNodes.nodes.new(BSDF_GLOSSY_NODE)
shader = newShader
replaceNode(shader, newShader)
TreeNodes.nodes.remove(shader)
nodesDictionary = makeTextureNodeDict(cmat)
# --------------------------------------
# Texture nodes
# BI Material to Cycles - Diffuse Textures
createDiffuseNodes(cmat, texCoordNode, mainShader, materialOutput)
# BI Material to Cycles - Normal map
createNormalNodes(cmat, texCoordNode, mainShader, materialOutput)
# BI Material to Cycles - Specular map
createSpecularNodes(cmat, texCoordNode, mainShader, mainDiffuse, materialOutput)
# BI Material to Cycles - Emission map
createEmissionNodes(cmat, texCoordNode, mainShader, materialOutput)
# Texture coordinates
# list all nodes conected to outputs
mappingNodes = [link.to_node for output in texCoordNode.outputs for link in output.links]
mappingNodesCount = len(mappingNodes)
if mappingNodes:
xList = [node.location.x for node in mappingNodes]
yList = [node.location.y for node in mappingNodes]
minPosX = min(xList) - 400
avgPosY = sum(yList) / mappingNodesCount
texCoordNode.location = Vector((minPosX, avgPosY))
# -----------------------------------------------------------------------------
# Operator Classes
class material_convert_all(Operator):
bl_idname = "xps_tools.convert_to_cycles_all"
bl_label = "Convert All Materials"
bl_description = ("Convert All Materials to BI and Cycles Nodes\n"
"Needs saving the .blend file first")
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return (bpy.data.filepath != "" and c_is_cycles_addon_enabled() and
c_data_has_materials())
def execute(self, context):
AutoNode(False, self)
return {'FINISHED'}
class material_convert_selected(Operator):
bl_idname = "xps_tools.convert_to_cycles_selected"
bl_label = "Convert All Materials From Selected Objects"
bl_description = ("Convert All Materials on Selected Objects to BI and Cycles Nodes\n"
"Needs saving the .blend file first")
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
return (bpy.data.filepath != "" and c_data_has_materials() and
c_is_cycles_addon_enabled() and
bool(next((obj for obj in context.selected_objects if obj.type == 'MESH'), None))
)
def execute(self, context):
AutoNode(True, self)
return {'FINISHED'}
def register():
bpy.utils.register_module(__name__)
pass
def unregister():
bpy.utils.unregister_module(__name__)
pass
if __name__ == "__main__":
register()
```
#### File: modules/snap_context/__init__.py
```python
__all__ = (
"SnapContext",
)
import bgl
from mathutils import Vector
VERT = 1
EDGE = 2
FACE = 4
class _Internal:
from .mesh_drawing import (
gpu_Indices_enable_state,
gpu_Indices_restore_state,
gpu_Indices_use_clip_planes,
gpu_Indices_set_ProjectionMatrix,
)
from .utils_projection import (
region_2d_to_orig_and_view_vector,
intersect_boundbox_threshold,
intersect_ray_segment_fac,
project_co_v3,
)
from mathutils.geometry import intersect_line_plane
class _SnapObjectData():
__slots__ = ('data', 'mat')
def __init__(self, data, omat):
self.data = data
self.mat = omat
class SnapContext():
"""
Initializes the snap context with the region and space where the snap objects will be added.
.. note::
After the context has been created, add the objects with the `add_obj` method.
:arg region: region of the 3D viewport, typically bpy.context.region.
:type region: :class:`bpy.types.Region`
:arg space: 3D region data, typically bpy.context.space_data.
:type space: :class:`bpy.types.SpaceView3D`
"""
def __init__(self, region, space):
import gpu
import ctypes
self.freed = False
self.snap_objects = []
self.drawn_count = 0
self._offset_cur = 1 # Starts with index 1
self.region = region
self.rv3d = space.region_3d
if self.rv3d.is_perspective:
self.depth_range = Vector((space.clip_start, space.clip_end))
else:
self.depth_range = Vector((-space.clip_end, space.clip_end))
self.proj_mat = None
self.mval = Vector((0, 0))
self._snap_mode = VERT | EDGE | FACE
self.set_pixel_dist(12)
self._offscreen = gpu.offscreen.new(self.region.width, self.region.height)
self._texture = self._offscreen.color_texture
bgl.glBindTexture(bgl.GL_TEXTURE_2D, self._texture)
NULL = bgl.Buffer(bgl.GL_INT, 1, (ctypes.c_int32 * 1).from_address(0))
bgl.glTexImage2D(bgl.GL_TEXTURE_2D, 0, bgl.GL_R32UI, self.region.width, self.region.height, 0, bgl.GL_RED_INTEGER, bgl.GL_UNSIGNED_INT, NULL)
del NULL
bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MIN_FILTER, bgl.GL_NEAREST)
bgl.glTexParameteri(bgl.GL_TEXTURE_2D, bgl.GL_TEXTURE_MAG_FILTER, bgl.GL_NEAREST)
bgl.glBindTexture(bgl.GL_TEXTURE_2D, 0)
self.winsize = Vector((self._offscreen.width, self._offscreen.height))
## PRIVATE ##
def _get_snap_obj_by_index(self, index):
for snap_obj in self.snap_objects[:self.drawn_count]:
data = snap_obj.data[1]
if index < data.first_index + data.get_tot_elems():
return snap_obj
return None
def _get_nearest_index(self):
loc = [self._dist_px, self._dist_px]
d = 1
m = self.threshold
max = 2 * m - 1
offset = 1
last_snap_obj = None
r_value = 0
while m < max:
for i in range(2):
while 2 * loc[i] * d < m:
value = int(self._snap_buffer[loc[0]][loc[1]])
loc[i] += d
if value >= offset:
r_value = value
snap_obj = self._get_snap_obj_by_index(r_value)
if self._snap_mode & FACE and self._snap_mode & (VERT | EDGE) and last_snap_obj != snap_obj:
data = snap_obj.data[1]
offset = data.first_index + data.num_tris
last_snap_obj = snap_obj
continue
return snap_obj, r_value
d = -d
m += 4 * self._dist_px * d + 1
return last_snap_obj, r_value
def _get_loc(self, snap_obj, index):
index -= snap_obj.data[1].first_index
gpu_data = snap_obj.data[1]
if gpu_data.draw_tris:
if index < snap_obj.data[1].num_tris:
tri_verts = gpu_data.get_tri_verts(index)
tri_co = [snap_obj.mat * Vector(v) for v in gpu_data.get_tri_co(index)]
nor = (tri_co[1] - tri_co[0]).cross(tri_co[2] - tri_co[0])
return _Internal.intersect_line_plane(self.last_ray[1], self.last_ray[1] + self.last_ray[0], tri_co[0], nor), tri_verts
index -= gpu_data.num_tris
if gpu_data.draw_edges:
if index < snap_obj.data[1].num_edges:
edge_verts = gpu_data.get_edge_verts(index)
edge_co = [snap_obj.mat * Vector(v) for v in gpu_data.get_edge_co(index)]
fac = _Internal.intersect_ray_segment_fac(*edge_co, *self.last_ray)
if (self._snap_mode) & VERT and (fac < 0.25 or fac > 0.75):
co = edge_co[0] if fac < 0.5 else edge_co[1]
proj_co = _Internal.project_co_v3(self, co)
dist = self.mval - proj_co
if abs(dist.x) < self._dist_px and abs(dist.y) < self._dist_px:
return co, (edge_verts[0] if fac < 0.5 else edge_verts[1],)
if fac <= 0.0:
co = edge_co[0]
elif fac >= 1.0:
co = edge_co[1]
else:
co = edge_co[0] + fac * (edge_co[1] - edge_co[0])
return co, edge_verts
index -= gpu_data.num_edges
if gpu_data.draw_verts:
if index < snap_obj.data[1].num_verts:
return snap_obj.mat * Vector(gpu_data.get_loosevert_co(index)), (gpu_data.get_loosevert_index(index),)
return None, None
def _get_snap_obj_by_obj(self, obj):
for snap_obj in self.snap_objects:
if obj == snap_obj.data[0]:
return snap_obj
def __del__(self):
if not self.freed:
self._offscreen.free()
# Some objects may still be being referenced
for snap_obj in self.snap_objects:
del snap_obj.data
del snap_obj.mat
del snap_obj
del self.snap_objects
## PUBLIC ##
def update_all(self):
self.drawn_count = 0
self._offset_cur = 1
bgl.glClearColor(0.0, 0.0, 0.0, 0.0)
bgl.glClear(bgl.GL_COLOR_BUFFER_BIT | bgl.GL_DEPTH_BUFFER_BIT)
def update_drawn_snap_object(self, snap_obj):
if len(snap_obj.data) > 1:
del snap_obj.data[1:]
#self.update_all()
# Update on next snap_get call #
self.proj_mat = None
def use_clip_planes(self, value):
_Internal.gpu_Indices_use_clip_planes(self.rv3d, value)
def set_pixel_dist(self, dist_px):
self._dist_px = int(dist_px)
self._dist_px_sq = self._dist_px ** 2
self.threshold = 2 * self._dist_px + 1
self._snap_buffer = bgl.Buffer(bgl.GL_FLOAT, (self.threshold, self.threshold))
def set_snap_mode(self, snap_to_vert, snap_to_edge, snap_to_face):
snap_mode = 0
if snap_to_vert:
snap_mode |= VERT
if snap_to_edge:
snap_mode |= EDGE
if snap_to_face:
snap_mode |= FACE
if snap_mode != self._snap_mode:
self._snap_mode = snap_mode
self.update_all()
def add_obj(self, obj, matrix):
matrix = matrix.copy()
snap_obj = self._get_snap_obj_by_obj(obj)
if not snap_obj:
self.snap_objects.append(_SnapObjectData([obj], matrix))
else:
self.snap_objects.append(_SnapObjectData(snap_obj.data, matrix))
return self.snap_objects[-1]
def get_ray(self, mval):
self.last_ray = _Internal.region_2d_to_orig_and_view_vector(self.region, self.rv3d, mval)
return self.last_ray
def snap_get(self, mval):
ret = None, None
self.mval[:] = mval
snap_vert = self._snap_mode & VERT != 0
snap_edge = self._snap_mode & EDGE != 0
snap_face = self._snap_mode & FACE != 0
_Internal.gpu_Indices_enable_state()
self._offscreen.bind()
#bgl.glDisable(bgl.GL_DITHER) # dithering and AA break color coding, so disable #
#multisample_enabled = bgl.glIsEnabled(bgl.GL_MULTISAMPLE)
#bgl.glDisable(bgl.GL_MULTISAMPLE)
bgl.glEnable(bgl.GL_DEPTH_TEST)
proj_mat = self.rv3d.perspective_matrix.copy()
if self.proj_mat != proj_mat:
self.proj_mat = proj_mat
_Internal.gpu_Indices_set_ProjectionMatrix(self.proj_mat)
self.update_all()
ray_dir, ray_orig = self.get_ray(mval)
for i, snap_obj in enumerate(self.snap_objects[self.drawn_count:], self.drawn_count):
obj = snap_obj.data[0]
bbmin = Vector(obj.bound_box[0])
bbmax = Vector(obj.bound_box[6])
if bbmin != bbmax:
MVP = proj_mat * snap_obj.mat
mat_inv = snap_obj.mat.inverted()
ray_orig_local = mat_inv * ray_orig
ray_dir_local = mat_inv.to_3x3() * ray_dir
in_threshold = _Internal.intersect_boundbox_threshold(self, MVP, ray_orig_local, ray_dir_local, bbmin, bbmax)
else:
proj_co = _Internal.project_co_v3(self, snap_obj.mat.translation)
dist = self.mval - proj_co
in_threshold = abs(dist.x) < self._dist_px and abs(dist.y) < self._dist_px
#snap_obj.data[1] = primitive_point
if in_threshold:
if len(snap_obj.data) == 1:
from .mesh_drawing import GPU_Indices_Mesh
snap_obj.data.append(GPU_Indices_Mesh(obj, snap_face, snap_edge, snap_vert))
snap_obj.data[1].set_draw_mode(snap_face, snap_edge, snap_vert)
snap_obj.data[1].set_ModelViewMatrix(snap_obj.mat)
snap_obj.data[1].Draw(self._offset_cur)
self._offset_cur += snap_obj.data[1].get_tot_elems()
self.snap_objects[self.drawn_count], self.snap_objects[i] = self.snap_objects[i], self.snap_objects[self.drawn_count]
self.drawn_count += 1
bgl.glReadBuffer(bgl.GL_COLOR_ATTACHMENT0)
bgl.glReadPixels(
int(self.mval[0]) - self._dist_px, int(self.mval[1]) - self._dist_px,
self.threshold, self.threshold, bgl.GL_RED_INTEGER, bgl.GL_UNSIGNED_INT, self._snap_buffer)
bgl.glReadBuffer(bgl.GL_BACK)
snap_obj, index = self._get_nearest_index()
#print(index)
if snap_obj:
ret = self._get_loc(snap_obj, index)
self._offscreen.unbind()
_Internal.gpu_Indices_restore_state()
return snap_obj, ret[0], ret[1]
def free(self):
self.__del__()
self.freed = True
```
#### File: modules/snap_context/mesh_drawing.py
```python
import bgl
import bmesh
import numpy as np
from mathutils import Matrix
from .utils_shader import Shader
def load_shader(shadername):
from os import path
with open(path.join(path.dirname(__file__), 'resources', shadername), 'r') as f:
return f.read()
def gl_buffer_void_as_long(value):
import ctypes
a = (ctypes.c_byte * 1).from_address(value)
return bgl.Buffer(bgl.GL_BYTE, 1, a)
def get_mesh_vert_co_array(me):
tot_vco = len(me.vertices)
if tot_vco:
verts_co = np.empty(len(me.vertices) * 3, 'f4')
me.vertices.foreach_get("co", verts_co)
verts_co.shape = (-1, 3)
return verts_co
return None
def get_bmesh_vert_co_array(bm):
tot_vco = len(bm.verts)
if tot_vco:
return np.array([v.co for v in bm.verts], 'f4')
return None
def get_mesh_tri_verts_array(me):
me.calc_tessface()
len_tessfaces = len(me.tessfaces)
if len_tessfaces:
tessfaces = np.empty(len_tessfaces * 4, 'i4')
me.tessfaces.foreach_get("vertices_raw", tessfaces)
tessfaces.shape = (-1, 4)
quad_indices = tessfaces[:, 3].nonzero()[0]
tris = np.empty(((len_tessfaces + len(quad_indices)), 3), 'i4')
tris[:len_tessfaces] = tessfaces[:, :3]
tris[len_tessfaces:] = tessfaces[quad_indices][:, (0, 2, 3)]
del tessfaces
return tris
return None
def get_bmesh_tri_verts_array(bm):
ltris = bm.calc_tessface()
tris = [[ltri[0].vert.index, ltri[1].vert.index, ltri[2].vert.index] for ltri in ltris if not ltri[0].face.hide]
if tris:
return np.array(tris, 'i4')
return None
def get_mesh_edge_verts_array(me):
tot_edges = len(me.edges)
if tot_edges:
edge_verts = np.empty(tot_edges * 2, 'i4')
me.edges.foreach_get("vertices", edge_verts)
edge_verts.shape = tot_edges, 2
return edge_verts
return None
def get_bmesh_edge_verts_array(bm):
bm.edges.ensure_lookup_table()
edges = [[e.verts[0].index, e.verts[1].index] for e in bm.edges if not e.hide]
if edges:
return np.array(edges, 'i4')
return None
def get_mesh_loosevert_array(me, edges):
verts = np.arange(len(me.vertices))
mask = np.in1d(verts, edges, invert=True)
verts = verts[mask]
if len(verts):
return verts
return None
def get_bmesh_loosevert_array(bm):
looseverts = [v.index for v in bm.verts if not (v.link_edges or v.hide)]
if looseverts:
return np.array(looseverts, 'i4')
return None
class _Mesh_Arrays():
def __init__(self, obj, create_tris, create_edges, create_looseverts):
self.tri_verts = self.edge_verts = self.looseverts = None
self.tris_co = self.edges_co = self.looseverts_co = None
if obj.type == 'MESH':
me = obj.data
if me.is_editmode:
bm = bmesh.from_edit_mesh(me)
bm.verts.ensure_lookup_table()
self.verts_co = get_bmesh_vert_co_array(bm)
if create_tris:
self.tri_verts = get_bmesh_tri_verts_array(bm)
if create_edges:
self.edge_verts = get_bmesh_edge_verts_array(bm)
if create_looseverts:
self.looseverts = get_bmesh_loosevert_array(bm)
else:
self.verts_co = get_mesh_vert_co_array(me)
if create_tris:
self.tri_verts = get_mesh_tri_verts_array(me)
if create_edges:
self.edge_verts = get_mesh_edge_verts_array(me)
if create_looseverts:
edge_verts = self.edge_verts
if edge_verts is None:
edge_verts = get_mesh_edge_verts_array(me)
self.looseverts = get_mesh_loosevert_array(me, edge_verts)
del edge_verts
else: #TODO
self.verts_co = np.zeros((1,3), 'f4')
self.looseverts = np.zeros(1, 'i4')
def __del__(self):
del self.tri_verts, self.edge_verts, self.looseverts
del self.verts_co
class GPU_Indices_Mesh():
shader = None
@classmethod
def end_opengl(cls):
del cls.shader
del cls._NULL
del cls.P
del cls.MV
del cls.MVP
del cls.vert_index
del cls.tri_co
del cls.edge_co
del cls.vert_co
del cls
@classmethod
def init_opengl(cls):
# OpenGL was already initialized, nothing to do here.
if cls.shader is not None:
return
import atexit
# Make sure we only registered the callback once.
atexit.unregister(cls.end_opengl)
atexit.register(cls.end_opengl)
cls.shader = Shader(
load_shader('3D_vert.glsl'),
None,
load_shader('primitive_id_frag.glsl'),
)
cls.unif_use_clip_planes = bgl.glGetUniformLocation(cls.shader.program, 'use_clip_planes')
cls.unif_clip_plane = bgl.glGetUniformLocation(cls.shader.program, 'clip_plane')
cls._NULL = gl_buffer_void_as_long(0)
cls.unif_MVP = bgl.glGetUniformLocation(cls.shader.program, 'MVP')
cls.unif_MV = bgl.glGetUniformLocation(cls.shader.program, 'MV')
cls.unif_offset = bgl.glGetUniformLocation(cls.shader.program, 'offset')
cls.attr_pos = bgl.glGetAttribLocation(cls.shader.program, 'pos')
cls.attr_primitive_id = bgl.glGetAttribLocation(cls.shader.program, 'primitive_id')
cls.P = bgl.Buffer(bgl.GL_FLOAT, (4, 4))
cls.MV = bgl.Buffer(bgl.GL_FLOAT, (4, 4))
cls.MVP = bgl.Buffer(bgl.GL_FLOAT, (4, 4))
# returns of public API #
cls.vert_index = bgl.Buffer(bgl.GL_INT, 1)
cls.tri_co = bgl.Buffer(bgl.GL_FLOAT, (3, 3))
cls.edge_co = bgl.Buffer(bgl.GL_FLOAT, (2, 3))
cls.vert_co = bgl.Buffer(bgl.GL_FLOAT, 3)
def __init__(self, obj, draw_tris, draw_edges, draw_verts):
GPU_Indices_Mesh.init_opengl()
self.obj = obj
self.draw_tris = draw_tris
self.draw_edges = draw_edges
self.draw_verts = draw_verts
self.vbo = None
self.vbo_tris = None
self.vbo_edges = None
self.vbo_verts = None
## Create VAO ##
self.vao = bgl.Buffer(bgl.GL_INT, 1)
bgl.glGenVertexArrays(1, self.vao)
bgl.glBindVertexArray(self.vao[0])
## Init Array ##
mesh_arrays = _Mesh_Arrays(obj, draw_tris, draw_edges, draw_verts)
## Create VBO for vertices ##
if mesh_arrays.verts_co is None:
self.draw_tris = False
self.draw_edges = False
self.draw_verts = False
return
if False: # Blender 2.8
self.vbo_len = len(mesh_arrays.verts_co)
self.vbo = bgl.Buffer(bgl.GL_INT, 1)
bgl.glGenBuffers(1, self.vbo)
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vbo[0])
verts_co = bgl.Buffer(bgl.GL_FLOAT, mesh_arrays.verts_co.shape, mesh_arrays.verts_co)
bgl.glBufferData(bgl.GL_ARRAY_BUFFER, self.vbo_len * 12, verts_co, bgl.GL_STATIC_DRAW)
## Create VBO for Tris ##
if mesh_arrays.tri_verts is not None:
self.tri_verts = mesh_arrays.tri_verts
self.num_tris = len(self.tri_verts)
np_tris_co = mesh_arrays.verts_co[mesh_arrays.tri_verts]
np_tris_co = bgl.Buffer(bgl.GL_FLOAT, np_tris_co.shape, np_tris_co)
self.vbo_tris = bgl.Buffer(bgl.GL_INT, 1)
bgl.glGenBuffers(1, self.vbo_tris)
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vbo_tris[0])
bgl.glBufferData(bgl.GL_ARRAY_BUFFER, self.num_tris * 36, np_tris_co, bgl.GL_STATIC_DRAW)
del np_tris_co
tri_indices = np.repeat(np.arange(self.num_tris, dtype = 'f4'), 3)
tri_indices = bgl.Buffer(bgl.GL_FLOAT, tri_indices.shape, tri_indices)
self.vbo_tri_indices = bgl.Buffer(bgl.GL_INT, 1)
bgl.glGenBuffers(1, self.vbo_tri_indices)
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vbo_tri_indices[0])
bgl.glBufferData(bgl.GL_ARRAY_BUFFER, self.num_tris * 12, tri_indices, bgl.GL_STATIC_DRAW)
del tri_indices
else:
self.num_tris = 0
self.draw_tris = False
## Create VBO for Edges ##
if mesh_arrays.edge_verts is not None:
self.edge_verts = mesh_arrays.edge_verts
self.num_edges = len(self.edge_verts)
np_edges_co = mesh_arrays.verts_co[mesh_arrays.edge_verts]
np_edges_co = bgl.Buffer(bgl.GL_FLOAT, np_edges_co.shape, np_edges_co)
self.vbo_edges = bgl.Buffer(bgl.GL_INT, 1)
bgl.glGenBuffers(1, self.vbo_edges)
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vbo_edges[0])
bgl.glBufferData(bgl.GL_ARRAY_BUFFER, self.num_edges * 24, np_edges_co, bgl.GL_STATIC_DRAW)
del np_edges_co
edge_indices = np.repeat(np.arange(self.num_edges, dtype = 'f4'), 2)
edge_indices = bgl.Buffer(bgl.GL_FLOAT, edge_indices.shape, edge_indices)
self.vbo_edge_indices = bgl.Buffer(bgl.GL_INT, 1)
bgl.glGenBuffers(1, self.vbo_edge_indices)
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vbo_edge_indices[0])
bgl.glBufferData(bgl.GL_ARRAY_BUFFER, self.num_edges * 8, edge_indices, bgl.GL_STATIC_DRAW)
del edge_indices
else:
self.num_edges = 0
self.draw_edges = False
## Create EBO for Loose Verts ##
if mesh_arrays.looseverts is not None:
self.looseverts = mesh_arrays.looseverts
self.num_verts = len(mesh_arrays.looseverts)
np_lverts_co = mesh_arrays.verts_co[mesh_arrays.looseverts]
np_lverts_co = bgl.Buffer(bgl.GL_FLOAT, np_lverts_co.shape, np_lverts_co)
self.vbo_verts = bgl.Buffer(bgl.GL_INT, 1)
bgl.glGenBuffers(1, self.vbo_verts)
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vbo_verts[0])
bgl.glBufferData(bgl.GL_ARRAY_BUFFER, self.num_verts * 12, np_lverts_co, bgl.GL_STATIC_DRAW)
del np_lverts_co
looseverts_indices = np.arange(self.num_verts, dtype = 'f4')
looseverts_indices = bgl.Buffer(bgl.GL_FLOAT, looseverts_indices.shape, looseverts_indices)
self.vbo_looseverts_indices = bgl.Buffer(bgl.GL_INT, 1)
bgl.glGenBuffers(1, self.vbo_looseverts_indices)
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vbo_looseverts_indices[0])
bgl.glBufferData(bgl.GL_ARRAY_BUFFER, self.num_verts * 4, looseverts_indices, bgl.GL_STATIC_DRAW)
del looseverts_indices
else:
self.num_verts = 0
self.draw_verts = False
del mesh_arrays
bgl.glBindVertexArray(0)
def get_tot_elems(self):
tot = 0
if self.draw_tris:
tot += self.num_tris
if self.draw_edges:
tot += self.num_edges
if self.draw_verts:
tot += self.num_verts
return tot
def set_draw_mode(self, draw_tris, draw_edges, draw_verts):
self.draw_tris = draw_tris and self.vbo_tris
self.draw_edges = draw_edges and self.vbo_edges
self.draw_verts = draw_verts and self.vbo_verts
def set_ModelViewMatrix(self, MV):
self.MV[:] = MV[:]
self.MVP[:] = Matrix(self.P) * MV
def Draw(self, index_offset):
self.first_index = index_offset
bgl.glUseProgram(self.shader.program)
bgl.glBindVertexArray(self.vao[0])
bgl.glUniformMatrix4fv(self.unif_MV, 1, bgl.GL_TRUE, self.MV)
bgl.glUniformMatrix4fv(self.unif_MVP, 1, bgl.GL_TRUE, self.MVP)
if self.draw_tris:
bgl.glUniform1f(self.unif_offset, float(index_offset)) # bgl has no glUniform1ui :\
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vbo_tris[0])
bgl.glEnableVertexAttribArray(self.attr_pos)
bgl.glVertexAttribPointer(self.attr_pos, 3, bgl.GL_FLOAT, bgl.GL_FALSE, 0, self._NULL)
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vbo_tri_indices[0])
bgl.glEnableVertexAttribArray(self.attr_primitive_id)
bgl.glVertexAttribPointer(self.attr_primitive_id, 1, bgl.GL_FLOAT, bgl.GL_FALSE, 0, self._NULL)
bgl.glDrawArrays(bgl.GL_TRIANGLES, 0, self.num_tris * 3)
index_offset += self.num_tris
bgl.glDepthRange(-0.00005, 0.99995)
if self.draw_edges:
bgl.glUniform1f(self.unif_offset, float(index_offset)) #TODO: use glUniform1ui
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vbo_edges[0])
bgl.glVertexAttribPointer(self.attr_pos, 3, bgl.GL_FLOAT, bgl.GL_FALSE, 0, self._NULL)
bgl.glEnableVertexAttribArray(self.attr_pos)
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vbo_edge_indices[0])
bgl.glVertexAttribPointer(self.attr_primitive_id, 1, bgl.GL_FLOAT, bgl.GL_FALSE, 0, self._NULL)
bgl.glEnableVertexAttribArray(self.attr_primitive_id)
bgl.glDrawArrays(bgl.GL_LINES, 0, self.num_edges * 2)
index_offset += self.num_edges
if self.draw_verts:
bgl.glUniform1f(self.unif_offset, float(index_offset)) #TODO: use glUniform1ui
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vbo_verts[0])
bgl.glVertexAttribPointer(self.attr_pos, 3, bgl.GL_FLOAT, bgl.GL_FALSE, 0, self._NULL)
bgl.glEnableVertexAttribArray(self.attr_pos)
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vbo_looseverts_indices[0])
bgl.glVertexAttribPointer(self.attr_primitive_id, 1, bgl.GL_FLOAT, bgl.GL_FALSE, 0, self._NULL)
bgl.glEnableVertexAttribArray(self.attr_primitive_id)
bgl.glDrawArrays(bgl.GL_POINTS, 0, self.num_verts)
bgl.glDepthRange(0.0, 1.0)
def get_tri_co(self, index):
bgl.glBindVertexArray(self.vao[0])
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vbo_tris[0])
bgl.glGetBufferSubData(bgl.GL_ARRAY_BUFFER, index * 36, 36, self.tri_co)
bgl.glBindVertexArray(0)
return self.tri_co
def get_edge_co(self, index):
bgl.glBindVertexArray(self.vao[0])
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vbo_edges[0])
bgl.glGetBufferSubData(bgl.GL_ARRAY_BUFFER, index * 24, 24, self.edge_co)
bgl.glBindVertexArray(0)
return self.edge_co
def get_loosevert_co(self, index):
bgl.glBindVertexArray(self.vao[0])
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, self.vbo_verts[0])
bgl.glGetBufferSubData(bgl.GL_ARRAY_BUFFER, index * 12, 12, self.vert_co)
bgl.glBindVertexArray(0)
return self.vert_co
def get_tri_verts(self, index):
return self.tri_verts[index]
def get_edge_verts(self, index):
return self.edge_verts[index]
def get_loosevert_index(self, index):
return self.looseverts[index]
def __del__(self):
if self.vbo_tris:
bgl.glDeleteBuffers(1, self.vbo_tris)
bgl.glDeleteBuffers(1, self.vbo_tri_indices)
del self.tri_verts
if self.vbo_edges:
bgl.glDeleteBuffers(1, self.vbo_edges)
bgl.glDeleteBuffers(1, self.vbo_edge_indices)
del self.edge_verts
if self.vbo_verts:
bgl.glDeleteBuffers(1, self.vbo_verts)
bgl.glDeleteBuffers(1, self.vbo_looseverts_indices)
del self.looseverts
bgl.glDeleteVertexArrays(1, self.vao)
#print('mesh_del', self.obj.name)
class PreviousGLState:
buf = bgl.Buffer(bgl.GL_INT, (4, 1))
cur_program = buf[0]
cur_vao = buf[1]
cur_vbo = buf[2]
cur_ebo = buf[3]
def _store_current_shader_state(cls):
bgl.glGetIntegerv(bgl.GL_CURRENT_PROGRAM, cls.cur_program)
bgl.glGetIntegerv(bgl.GL_VERTEX_ARRAY_BINDING, cls.cur_vao)
bgl.glGetIntegerv(bgl.GL_ARRAY_BUFFER_BINDING, cls.cur_vbo)
bgl.glGetIntegerv(bgl.GL_ELEMENT_ARRAY_BUFFER_BINDING, cls.cur_ebo)
def _restore_shader_state(cls):
bgl.glUseProgram(cls.cur_program[0])
bgl.glBindVertexArray(cls.cur_vao[0])
bgl.glBindBuffer(bgl.GL_ARRAY_BUFFER, cls.cur_vbo[0])
bgl.glBindBuffer(bgl.GL_ELEMENT_ARRAY_BUFFER, cls.cur_ebo[0])
def gpu_Indices_enable_state():
_store_current_shader_state(PreviousGLState)
GPU_Indices_Mesh.init_opengl()
bgl.glUseProgram(GPU_Indices_Mesh.shader.program)
#bgl.glBindVertexArray(GPU_Indices_Mesh.vao[0])
def gpu_Indices_restore_state():
bgl.glBindVertexArray(0)
_restore_shader_state(PreviousGLState)
def gpu_Indices_use_clip_planes(rv3d, value):
if rv3d.use_clip_planes:
planes = bgl.Buffer(bgl.GL_FLOAT, (6, 4), rv3d.clip_planes)
_store_current_shader_state(PreviousGLState)
GPU_Indices_Mesh.init_opengl()
bgl.glUseProgram(GPU_Indices_Mesh.shader.program)
bgl.glUniform1i(GPU_Indices_Mesh.unif_use_clip_planes, value)
bgl.glUniform4fv(GPU_Indices_Mesh.unif_clip_plane, 4, planes)
_restore_shader_state(PreviousGLState)
def gpu_Indices_set_ProjectionMatrix(P):
GPU_Indices_Mesh.P[:] = P
```
#### File: blender-addons-master/object_print3d_utils/operators.py
```python
import array
import bpy
from bpy.types import Operator
from bpy.props import (
IntProperty,
FloatProperty,
)
import bmesh
from . import (
mesh_helpers,
report,
)
def clean_float(text):
# strip trailing zeros: 0.000 -> 0.0
index = text.rfind(".")
if index != -1:
index += 2
head, tail = text[:index], text[index:]
tail = tail.rstrip("0")
text = head + tail
return text
# ---------
# Mesh Info
class MESH_OT_Print3D_Info_Volume(Operator):
"""Report the volume of the active mesh"""
bl_idname = "mesh.print3d_info_volume"
bl_label = "Print3D Info Volume"
def execute(self, context):
scene = context.scene
unit = scene.unit_settings
scale = 1.0 if unit.system == 'NONE' else unit.scale_length
obj = context.active_object
bm = mesh_helpers.bmesh_copy_from_object(obj, apply_modifiers=True)
volume = bm.calc_volume()
bm.free()
info = []
if unit.system == 'METRIC':
info.append(("Volume: %s cm³" % clean_float("%.4f" % ((volume * (scale ** 3.0)) / (0.01 ** 3.0))), None))
elif unit.system == 'IMPERIAL':
info.append(("Volume: %s \"³" % clean_float("%.4f" % ((volume * (scale ** 3.0)) / (0.0254 ** 3.0))), None))
else:
info.append(("Volume: %s³" % clean_float("%.8f" % volume), None))
report.update(*info)
return {'FINISHED'}
class MESH_OT_Print3D_Info_Area(Operator):
"""Report the surface area of the active mesh"""
bl_idname = "mesh.print3d_info_area"
bl_label = "Print3D Info Area"
def execute(self, context):
scene = context.scene
unit = scene.unit_settings
scale = 1.0 if unit.system == 'NONE' else unit.scale_length
obj = context.active_object
bm = mesh_helpers.bmesh_copy_from_object(obj, apply_modifiers=True)
area = mesh_helpers.bmesh_calc_area(bm)
bm.free()
info = []
if unit.system == 'METRIC':
info.append(("Area: %s cm²" % clean_float("%.4f" % ((area * (scale ** 2.0)) / (0.01 ** 2.0))), None))
elif unit.system == 'IMPERIAL':
info.append(("Area: %s \"²" % clean_float("%.4f" % ((area * (scale ** 2.0)) / (0.0254 ** 2.0))), None))
else:
info.append(("Area: %s²" % clean_float("%.8f" % area), None))
report.update(*info)
return {'FINISHED'}
# ---------------
# Geometry Checks
def execute_check(self, context):
obj = context.active_object
info = []
self.main_check(obj, info)
report.update(*info)
multiple_obj_warning(self, context)
return {'FINISHED'}
def multiple_obj_warning(self, context):
if len(context.selected_objects) > 1:
self.report({"INFO"}, "Multiple selected objects. Only the active one will be evaluated")
class MESH_OT_Print3D_Check_Solid(Operator):
"""Check for geometry is solid (has valid inside/outside) and correct normals"""
bl_idname = "mesh.print3d_check_solid"
bl_label = "Print3D Check Solid"
@staticmethod
def main_check(obj, info):
bm = mesh_helpers.bmesh_copy_from_object(obj, transform=False, triangulate=False)
edges_non_manifold = array.array('i', (i for i, ele in enumerate(bm.edges)
if not ele.is_manifold))
edges_non_contig = array.array('i', (i for i, ele in enumerate(bm.edges)
if ele.is_manifold and (not ele.is_contiguous)))
info.append(("Non Manifold Edge: %d" % len(edges_non_manifold),
(bmesh.types.BMEdge, edges_non_manifold)))
info.append(("Bad Contig. Edges: %d" % len(edges_non_contig),
(bmesh.types.BMEdge, edges_non_contig)))
bm.free()
def execute(self, context):
return execute_check(self, context)
class MESH_OT_Print3D_Check_Intersections(Operator):
"""Check geometry for self intersections"""
bl_idname = "mesh.print3d_check_intersect"
bl_label = "Print3D Check Intersections"
@staticmethod
def main_check(obj, info):
faces_intersect = mesh_helpers.bmesh_check_self_intersect_object(obj)
info.append(("Intersect Face: %d" % len(faces_intersect),
(bmesh.types.BMFace, faces_intersect)))
def execute(self, context):
return execute_check(self, context)
class MESH_OT_Print3D_Check_Degenerate(Operator):
"""Check for degenerate geometry that may not print properly """ \
"""(zero area faces, zero length edges)"""
bl_idname = "mesh.print3d_check_degenerate"
bl_label = "Print3D Check Degenerate"
@staticmethod
def main_check(obj, info):
scene = bpy.context.scene
print_3d = scene.print_3d
threshold = print_3d.threshold_zero
bm = mesh_helpers.bmesh_copy_from_object(obj, transform=False, triangulate=False)
faces_zero = array.array('i', (i for i, ele in enumerate(bm.faces) if ele.calc_area() <= threshold))
edges_zero = array.array('i', (i for i, ele in enumerate(bm.edges) if ele.calc_length() <= threshold))
info.append(("Zero Faces: %d" % len(faces_zero),
(bmesh.types.BMFace, faces_zero)))
info.append(("Zero Edges: %d" % len(edges_zero),
(bmesh.types.BMEdge, edges_zero)))
bm.free()
def execute(self, context):
return execute_check(self, context)
class MESH_OT_Print3D_Check_Distorted(Operator):
"""Check for non-flat faces """
bl_idname = "mesh.print3d_check_distort"
bl_label = "Print3D Check Distorted Faces"
@staticmethod
def main_check(obj, info):
scene = bpy.context.scene
print_3d = scene.print_3d
angle_distort = print_3d.angle_distort
bm = mesh_helpers.bmesh_copy_from_object(obj, transform=True, triangulate=False)
bm.normal_update()
faces_distort = array.array(
'i',
(i for i, ele in enumerate(bm.faces) if mesh_helpers.face_is_distorted(ele, angle_distort))
)
info.append(("Non-Flat Faces: %d" % len(faces_distort),
(bmesh.types.BMFace, faces_distort)))
bm.free()
def execute(self, context):
return execute_check(self, context)
class MESH_OT_Print3D_Check_Thick(Operator):
"""Check geometry is above the minimum thickness preference """ \
"""(relies on correct normals)"""
bl_idname = "mesh.print3d_check_thick"
bl_label = "Print3D Check Thickness"
@staticmethod
def main_check(obj, info):
scene = bpy.context.scene
print_3d = scene.print_3d
faces_error = mesh_helpers.bmesh_check_thick_object(obj, print_3d.thickness_min)
info.append(("Thin Faces: %d" % len(faces_error),
(bmesh.types.BMFace, faces_error)))
def execute(self, context):
return execute_check(self, context)
class MESH_OT_Print3D_Check_Sharp(Operator):
"""Check edges are below the sharpness preference"""
bl_idname = "mesh.print3d_check_sharp"
bl_label = "Print3D Check Sharp"
@staticmethod
def main_check(obj, info):
scene = bpy.context.scene
print_3d = scene.print_3d
angle_sharp = print_3d.angle_sharp
bm = mesh_helpers.bmesh_copy_from_object(obj, transform=True, triangulate=False)
bm.normal_update()
edges_sharp = [ele.index for ele in bm.edges
if ele.is_manifold and ele.calc_face_angle_signed() > angle_sharp]
info.append(("Sharp Edge: %d" % len(edges_sharp),
(bmesh.types.BMEdge, edges_sharp)))
bm.free()
def execute(self, context):
return execute_check(self, context)
class MESH_OT_Print3D_Check_Overhang(Operator):
"""Check faces don't overhang past a certain angle"""
bl_idname = "mesh.print3d_check_overhang"
bl_label = "Print3D Check Overhang"
@staticmethod
def main_check(obj, info):
import math
from mathutils import Vector
scene = bpy.context.scene
print_3d = scene.print_3d
angle_overhang = (math.pi / 2.0) - print_3d.angle_overhang
if angle_overhang == math.pi:
info.append(("Skipping Overhang", ()))
return
bm = mesh_helpers.bmesh_copy_from_object(obj, transform=True, triangulate=False)
bm.normal_update()
z_down = Vector((0, 0, -1.0))
z_down_angle = z_down.angle
# 4.0 ignores zero area faces
faces_overhang = [ele.index for ele in bm.faces
if z_down_angle(ele.normal, 4.0) < angle_overhang]
info.append(("Overhang Face: %d" % len(faces_overhang),
(bmesh.types.BMFace, faces_overhang)))
bm.free()
def execute(self, context):
return execute_check(self, context)
class MESH_OT_Print3D_Check_All(Operator):
"""Run all checks"""
bl_idname = "mesh.print3d_check_all"
bl_label = "Print3D Check All"
check_cls = (
MESH_OT_Print3D_Check_Solid,
MESH_OT_Print3D_Check_Intersections,
MESH_OT_Print3D_Check_Degenerate,
MESH_OT_Print3D_Check_Distorted,
MESH_OT_Print3D_Check_Thick,
MESH_OT_Print3D_Check_Sharp,
MESH_OT_Print3D_Check_Overhang,
)
def execute(self, context):
obj = context.active_object
info = []
for cls in self.check_cls:
cls.main_check(obj, info)
report.update(*info)
multiple_obj_warning(self, context)
return {'FINISHED'}
class MESH_OT_Print3D_Clean_Isolated(Operator):
"""Cleanup isolated vertices and edges"""
bl_idname = "mesh.print3d_clean_isolated"
bl_label = "Print3D Clean Isolated "
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
obj = context.active_object
bm = mesh_helpers.bmesh_from_object(obj)
info = []
change = False
def face_is_isolated(ele):
for loop in ele.loops:
loop_next = loop.link_loop_radial_next
if loop is not loop_next:
return False
return True
def edge_is_isolated(ele):
return ele.is_wire
def vert_is_isolated(ele):
return (not bool(ele.link_edges))
# --- face
elems_remove = [ele for ele in bm.faces if face_is_isolated(ele)]
remove = bm.faces.remove
for ele in elems_remove:
remove(ele)
change |= bool(elems_remove)
info.append(("Faces Removed: %d" % len(elems_remove),
None))
del elems_remove
# --- edge
elems_remove = [ele for ele in bm.edges if edge_is_isolated(ele)]
remove = bm.edges.remove
for ele in elems_remove:
remove(ele)
change |= bool(elems_remove)
info.append(("Edge Removed: %d" % len(elems_remove),
None))
del elems_remove
# --- vert
elems_remove = [ele for ele in bm.verts if vert_is_isolated(ele)]
remove = bm.verts.remove
for ele in elems_remove:
remove(ele)
change |= bool(elems_remove)
info.append(("Verts Removed: %d" % len(elems_remove),
None))
del elems_remove
# ---
report.update(*info)
if change:
mesh_helpers.bmesh_to_object(obj, bm)
return {'FINISHED'}
else:
return {'CANCELLED'}
class MESH_OT_Print3D_Clean_Distorted(Operator):
"""Tessellate distorted faces"""
bl_idname = "mesh.print3d_clean_distorted"
bl_label = "Print3D Clean Distorted"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
scene = bpy.context.scene
print_3d = scene.print_3d
angle_distort = print_3d.angle_distort
obj = context.active_object
bm = mesh_helpers.bmesh_from_object(obj)
bm.normal_update()
elems_triangulate = [ele for ele in bm.faces if mesh_helpers.face_is_distorted(ele, angle_distort)]
if elems_triangulate:
bmesh.ops.triangulate(bm, faces=elems_triangulate)
mesh_helpers.bmesh_to_object(obj, bm)
return {'FINISHED'}
else:
return {'CANCELLED'}
class MESH_OT_Print3D_Clean_Non_Manifold(Operator):
"""Cleanup problems, like holes, non-manifold vertices, and inverted normals"""
bl_idname = "mesh.print3d_clean_non_manifold"
bl_label = "Print3D Clean Non-Manifold and Inverted"
bl_options = {'REGISTER', 'UNDO'}
threshold = bpy.props.FloatProperty(
name="threshold",
description="Minimum distance between elements to merge",
default=0.0001,
)
sides = bpy.props.IntProperty(
name="sides",
description="Number of sides in hole required to fill",
default=4,
)
def execute(self, context):
self.context = context
mode_orig = context.mode
self.setup_environment()
bm_key_orig = self.elem_count(context)
self.delete_loose()
self.remove_doubles(self.threshold)
self.dissolve_degenerate(self.threshold)
# may take a while
self.fix_non_manifold(context, self.sides)
self.make_normals_consistently_outwards()
bm_key = self.elem_count(context)
if mode_orig != 'EDIT_MESH':
bpy.ops.object.mode_set(mode='OBJECT')
self.report(
{'INFO'},
"Modified Verts:%+d, Edges:%+d, Faces:%+d" %
(bm_key[0] - bm_key_orig[0],
bm_key[1] - bm_key_orig[1],
bm_key[2] - bm_key_orig[2],
))
return {'FINISHED'}
@staticmethod
def elem_count(context):
bm = bmesh.from_edit_mesh(context.edit_object.data)
return len(bm.verts), len(bm.edges), len(bm.faces)
@staticmethod
def setup_environment():
"""set the mode as edit, select mode as vertices, and reveal hidden vertices"""
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_mode(type='VERT')
bpy.ops.mesh.reveal()
@staticmethod
def remove_doubles(threshold):
"""remove duplicate vertices"""
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.remove_doubles(threshold=threshold)
@staticmethod
def delete_loose():
"""delete loose vertices/edges/faces"""
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.delete_loose()
@staticmethod
def dissolve_degenerate(threshold):
"""dissolve zero area faces and zero length edges"""
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.dissolve_degenerate(threshold=threshold)
@staticmethod
def make_normals_consistently_outwards():
"""have all normals face outwards"""
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.normals_make_consistent()
@classmethod
def fix_non_manifold(cls, context, sides):
"""naive iterate-until-no-more approach for fixing manifolds"""
total_non_manifold = cls.count_non_manifold_verts(context)
if not total_non_manifold:
return
bm_states = set()
bm_key = cls.elem_count(context)
bm_states.add(bm_key)
while True:
cls.fill_non_manifold(sides)
cls.delete_newly_generated_non_manifold_verts()
bm_key = cls.elem_count(context)
if bm_key in bm_states:
break
else:
bm_states.add(bm_key)
@staticmethod
def select_non_manifold_verts(
use_wire=False,
use_boundary=False,
use_multi_face=False,
use_non_contiguous=False,
use_verts=False,
):
"""select non-manifold vertices"""
bpy.ops.mesh.select_non_manifold(
extend=False,
use_wire=use_wire,
use_boundary=use_boundary,
use_multi_face=use_multi_face,
use_non_contiguous=use_non_contiguous,
use_verts=use_verts,
)
@classmethod
def count_non_manifold_verts(cls, context):
"""return a set of coordinates of non-manifold vertices"""
cls.select_non_manifold_verts(
use_wire=True,
use_boundary=True,
use_verts=True,
)
bm = bmesh.from_edit_mesh(context.edit_object.data)
return sum((1 for v in bm.verts if v.select))
@classmethod
def fill_non_manifold(cls, sides):
"""fill holes and then fill in any remnant non-manifolds"""
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.fill_holes(sides=sides)
# fill selected edge faces, which could be additional holes
cls.select_non_manifold_verts(use_boundary=True)
bpy.ops.mesh.fill()
@classmethod
def delete_newly_generated_non_manifold_verts(cls):
"""delete any newly generated vertices from the filling repair"""
cls.select_non_manifold_verts(use_wire=True, use_verts=True)
bpy.ops.mesh.delete(type='VERT')
class MESH_OT_Print3D_Clean_Thin(Operator):
"""Ensure minimum thickness"""
bl_idname = "mesh.print3d_clean_thin"
bl_label = "Print3D Clean Thin"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
# TODO
return {'FINISHED'}
# -------------
# Select Report
# ... helper function for info UI
class MESH_OT_Print3D_Select_Report(Operator):
"""Select the data associated with this report"""
bl_idname = "mesh.print3d_select_report"
bl_label = "Print3D Select Report"
bl_options = {'INTERNAL'}
index = IntProperty()
_type_to_mode = {
bmesh.types.BMVert: 'VERT',
bmesh.types.BMEdge: 'EDGE',
bmesh.types.BMFace: 'FACE',
}
_type_to_attr = {
bmesh.types.BMVert: "verts",
bmesh.types.BMEdge: "edges",
bmesh.types.BMFace: "faces",
}
def execute(self, context):
obj = context.edit_object
info = report.info()
text, data = info[self.index]
bm_type, bm_array = data
bpy.ops.mesh.reveal()
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.mesh.select_mode(type=self._type_to_mode[bm_type])
bm = bmesh.from_edit_mesh(obj.data)
elems = getattr(bm, MESH_OT_Print3D_Select_Report._type_to_attr[bm_type])[:]
try:
for i in bm_array:
elems[i].select_set(True)
except:
# possible arrays are out of sync
self.report({'WARNING'}, "Report is out of date, re-run check")
# cool, but in fact annoying
# bpy.ops.view3d.view_selected(use_all_regions=False)
return {'FINISHED'}
# -----------
# Scale to...
def _scale(scale, report=None, report_suffix=""):
if scale != 1.0:
bpy.ops.transform.resize(value=(scale,) * 3,
mirror=False, proportional='DISABLED',
snap=False,
texture_space=False)
if report is not None:
report({'INFO'}, "Scaled by %s%s" % (clean_float("%.6f" % scale), report_suffix))
class MESH_OT_Print3D_Scale_To_Volume(Operator):
"""Scale edit-mesh or selected-objects to a set volume"""
bl_idname = "mesh.print3d_scale_to_volume"
bl_label = "Scale to Volume"
bl_options = {'REGISTER', 'UNDO'}
volume_init = FloatProperty(
options={'HIDDEN'},
)
volume = FloatProperty(
name="Volume",
unit='VOLUME',
min=0.0, max=100000.0,
)
def execute(self, context):
import math
scale = math.pow(self.volume, 1 / 3) / math.pow(self.volume_init, 1 / 3)
self.report({'INFO'}, "Scaled by %s" % clean_float("%.6f" % scale))
_scale(scale, self.report)
return {'FINISHED'}
def invoke(self, context, event):
def calc_volume(obj):
bm = mesh_helpers.bmesh_copy_from_object(obj, apply_modifiers=True)
volume = bm.calc_volume(signed=True)
bm.free()
return volume
if context.mode == 'EDIT_MESH':
volume = calc_volume(context.edit_object)
else:
volume = sum(calc_volume(obj) for obj in context.selected_editable_objects
if obj.type == 'MESH')
if volume == 0.0:
self.report({'WARNING'}, "Object has zero volume")
return {'CANCELLED'}
self.volume_init = self.volume = abs(volume)
wm = context.window_manager
return wm.invoke_props_dialog(self)
class MESH_OT_Print3D_Scale_To_Bounds(Operator):
"""Scale edit-mesh or selected-objects to fit within a maximum length"""
bl_idname = "mesh.print3d_scale_to_bounds"
bl_label = "Scale to Bounds"
bl_options = {'REGISTER', 'UNDO'}
length_init = FloatProperty(
options={'HIDDEN'},
)
axis_init = IntProperty(
options={'HIDDEN'},
)
length = FloatProperty(
name="Length Limit",
unit='LENGTH',
min=0.0, max=100000.0,
)
def execute(self, context):
scale = self.length / self.length_init
_scale(scale,
report=self.report,
report_suffix=", Clamping %s-Axis" % "XYZ"[self.axis_init])
return {'FINISHED'}
def invoke(self, context, event):
from mathutils import Vector
def calc_length(vecs):
return max(((max(v[i] for v in vecs) - min(v[i] for v in vecs)), i) for i in range(3))
if context.mode == 'EDIT_MESH':
length, axis = calc_length([Vector(v) * obj.matrix_world
for obj in [context.edit_object]
for v in obj.bound_box])
else:
length, axis = calc_length([Vector(v) * obj.matrix_world
for obj in context.selected_editable_objects
if obj.type == 'MESH' for v in obj.bound_box])
if length == 0.0:
self.report({'WARNING'}, "Object has zero bounds")
return {'CANCELLED'}
self.length_init = self.length = length
self.axis_init = axis
wm = context.window_manager
return wm.invoke_props_dialog(self)
# ------
# Export
class MESH_OT_Print3D_Export(Operator):
"""Export active object using print3d settings"""
bl_idname = "mesh.print3d_export"
bl_label = "Print3D Export"
def execute(self, context):
from . import export
info = []
ret = export.write_mesh(context, info, self.report)
report.update(*info)
if ret:
return {'FINISHED'}
else:
return {'CANCELLED'}
```
#### File: uv_magic_uv/op/transfer_uv.py
```python
__author__ = "Nutti <<EMAIL>>, Mifth, MaxRobinot"
__status__ = "production"
__version__ = "5.1"
__date__ = "24 Feb 2018"
from collections import OrderedDict
import bpy
import bmesh
from bpy.props import BoolProperty
from .. import common
class MUV_TransUVCopy(bpy.types.Operator):
"""
Operation class: Transfer UV copy
Topological based copy
"""
bl_idname = "uv.muv_transuv_copy"
bl_label = "Transfer UV Copy"
bl_description = "Transfer UV Copy (Topological based copy)"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
props = context.scene.muv_props.transuv
active_obj = context.scene.objects.active
bm = bmesh.from_edit_mesh(active_obj.data)
if common.check_version(2, 73, 0) >= 0:
bm.faces.ensure_lookup_table()
# get UV layer
if not bm.loops.layers.uv:
self.report({'WARNING'}, "Object must have more than one UV map")
return {'CANCELLED'}
uv_layer = bm.loops.layers.uv.verify()
props.topology_copied.clear()
# get selected faces
active_face = bm.faces.active
sel_faces = [face for face in bm.faces if face.select]
if len(sel_faces) != 2:
self.report({'WARNING'}, "Two faces must be selected")
return {'CANCELLED'}
if not active_face or active_face not in sel_faces:
self.report({'WARNING'}, "Two faces must be active")
return {'CANCELLED'}
# parse all faces according to selection
active_face_nor = active_face.normal.copy()
all_sorted_faces = main_parse(
self, uv_layer, sel_faces, active_face,
active_face_nor)
if all_sorted_faces:
for face_data in all_sorted_faces.values():
edges = face_data[1]
uv_loops = face_data[2]
uvs = [l.uv.copy() for l in uv_loops]
pin_uvs = [l.pin_uv for l in uv_loops]
seams = [e.seam for e in edges]
props.topology_copied.append([uvs, pin_uvs, seams])
bmesh.update_edit_mesh(active_obj.data)
return {'FINISHED'}
class MUV_TransUVPaste(bpy.types.Operator):
"""
Operation class: Transfer UV paste
Topological based paste
"""
bl_idname = "uv.muv_transuv_paste"
bl_label = "Transfer UV Paste"
bl_description = "Transfer UV Paste (Topological based paste)"
bl_options = {'REGISTER', 'UNDO'}
invert_normals = BoolProperty(
name="Invert Normals",
description="Invert Normals",
default=False
)
copy_seams = BoolProperty(
name="Copy Seams",
description="Copy Seams",
default=True
)
def execute(self, context):
props = context.scene.muv_props.transuv
active_obj = context.scene.objects.active
bm = bmesh.from_edit_mesh(active_obj.data)
if common.check_version(2, 73, 0) >= 0:
bm.faces.ensure_lookup_table()
# get UV layer
if not bm.loops.layers.uv:
self.report({'WARNING'}, "Object must have more than one UV map")
return {'CANCELLED'}
uv_layer = bm.loops.layers.uv.verify()
# get selection history
all_sel_faces = [
e for e in bm.select_history
if isinstance(e, bmesh.types.BMFace) and e.select]
if len(all_sel_faces) % 2 != 0:
self.report({'WARNING'}, "Two faces must be selected")
return {'CANCELLED'}
# parse selection history
for i, _ in enumerate(all_sel_faces):
if (i == 0) or (i % 2 == 0):
continue
sel_faces = [all_sel_faces[i - 1], all_sel_faces[i]]
active_face = all_sel_faces[i]
# parse all faces according to selection history
active_face_nor = active_face.normal.copy()
if self.invert_normals:
active_face_nor.negate()
all_sorted_faces = main_parse(
self, uv_layer, sel_faces, active_face,
active_face_nor)
if all_sorted_faces:
# check amount of copied/pasted faces
if len(all_sorted_faces) != len(props.topology_copied):
self.report(
{'WARNING'},
"Mesh has different amount of faces"
)
return {'FINISHED'}
for j, face_data in enumerate(all_sorted_faces.values()):
copied_data = props.topology_copied[j]
# check amount of copied/pasted verts
if len(copied_data[0]) != len(face_data[2]):
bpy.ops.mesh.select_all(action='DESELECT')
# select problematic face
list(all_sorted_faces.keys())[j].select = True
self.report(
{'WARNING'},
"Face have different amount of vertices"
)
return {'FINISHED'}
for k, (edge, uvloop) in enumerate(zip(face_data[1],
face_data[2])):
uvloop.uv = copied_data[0][k]
uvloop.pin_uv = copied_data[1][k]
if self.copy_seams:
edge.seam = copied_data[2][k]
bmesh.update_edit_mesh(active_obj.data)
if self.copy_seams:
active_obj.data.show_edge_seams = True
return {'FINISHED'}
def main_parse(
self, uv_layer, sel_faces,
active_face, active_face_nor):
all_sorted_faces = OrderedDict() # This is the main stuff
used_verts = set()
used_edges = set()
faces_to_parse = []
# get shared edge of two faces
cross_edges = []
for edge in active_face.edges:
if edge in sel_faces[0].edges and edge in sel_faces[1].edges:
cross_edges.append(edge)
# parse two selected faces
if cross_edges and len(cross_edges) == 1:
shared_edge = cross_edges[0]
vert1 = None
vert2 = None
dot_n = active_face_nor.normalized()
edge_vec_1 = (shared_edge.verts[1].co - shared_edge.verts[0].co)
edge_vec_len = edge_vec_1.length
edge_vec_1 = edge_vec_1.normalized()
af_center = active_face.calc_center_median()
af_vec = shared_edge.verts[0].co + (edge_vec_1 * (edge_vec_len * 0.5))
af_vec = (af_vec - af_center).normalized()
if af_vec.cross(edge_vec_1).dot(dot_n) > 0:
vert1 = shared_edge.verts[0]
vert2 = shared_edge.verts[1]
else:
vert1 = shared_edge.verts[1]
vert2 = shared_edge.verts[0]
# get active face stuff and uvs
face_stuff = get_other_verts_edges(
active_face, vert1, vert2, shared_edge, uv_layer)
all_sorted_faces[active_face] = face_stuff
used_verts.update(active_face.verts)
used_edges.update(active_face.edges)
# get first selected face stuff and uvs as they share shared_edge
second_face = sel_faces[0]
if second_face is active_face:
second_face = sel_faces[1]
face_stuff = get_other_verts_edges(
second_face, vert1, vert2, shared_edge, uv_layer)
all_sorted_faces[second_face] = face_stuff
used_verts.update(second_face.verts)
used_edges.update(second_face.edges)
# first Grow
faces_to_parse.append(active_face)
faces_to_parse.append(second_face)
else:
self.report({'WARNING'}, "Two faces should share one edge")
return None
# parse all faces
while True:
new_parsed_faces = []
if not faces_to_parse:
break
for face in faces_to_parse:
face_stuff = all_sorted_faces.get(face)
new_faces = parse_faces(
face, face_stuff, used_verts, used_edges, all_sorted_faces,
uv_layer)
if new_faces == 'CANCELLED':
self.report({'WARNING'}, "More than 2 faces share edge")
return None
new_parsed_faces += new_faces
faces_to_parse = new_parsed_faces
return all_sorted_faces
def parse_faces(
check_face, face_stuff, used_verts, used_edges, all_sorted_faces,
uv_layer):
"""recurse faces around the new_grow only"""
new_shared_faces = []
for sorted_edge in face_stuff[1]:
shared_faces = sorted_edge.link_faces
if shared_faces:
if len(shared_faces) > 2:
bpy.ops.mesh.select_all(action='DESELECT')
for face_sel in shared_faces:
face_sel.select = True
shared_faces = []
return 'CANCELLED'
clear_shared_faces = get_new_shared_faces(
check_face, sorted_edge, shared_faces, all_sorted_faces.keys())
if clear_shared_faces:
shared_face = clear_shared_faces[0]
# get vertices of the edge
vert1 = sorted_edge.verts[0]
vert2 = sorted_edge.verts[1]
common.debug_print(face_stuff[0], vert1, vert2)
if face_stuff[0].index(vert1) > face_stuff[0].index(vert2):
vert1 = sorted_edge.verts[1]
vert2 = sorted_edge.verts[0]
common.debug_print(shared_face.verts, vert1, vert2)
new_face_stuff = get_other_verts_edges(
shared_face, vert1, vert2, sorted_edge, uv_layer)
all_sorted_faces[shared_face] = new_face_stuff
used_verts.update(shared_face.verts)
used_edges.update(shared_face.edges)
if common.DEBUG:
shared_face.select = True # test which faces are parsed
new_shared_faces.append(shared_face)
return new_shared_faces
def get_new_shared_faces(orig_face, shared_edge, check_faces, used_faces):
shared_faces = []
for face in check_faces:
is_shared_edge = shared_edge in face.edges
not_used = face not in used_faces
not_orig = face is not orig_face
not_hide = face.hide is False
if is_shared_edge and not_used and not_orig and not_hide:
shared_faces.append(face)
return shared_faces
def get_other_verts_edges(face, vert1, vert2, first_edge, uv_layer):
face_edges = [first_edge]
face_verts = [vert1, vert2]
face_loops = []
other_edges = [edge for edge in face.edges if edge not in face_edges]
for _ in range(len(other_edges)):
found_edge = None
# get sorted verts and edges
for edge in other_edges:
if face_verts[-1] in edge.verts:
other_vert = edge.other_vert(face_verts[-1])
if other_vert not in face_verts:
face_verts.append(other_vert)
found_edge = edge
if found_edge not in face_edges:
face_edges.append(edge)
break
other_edges.remove(found_edge)
# get sorted uvs
for vert in face_verts:
for loop in face.loops:
if loop.vert is vert:
face_loops.append(loop[uv_layer])
break
return [face_verts, face_edges, face_loops]
```
#### File: blender-addons-master/uv_magic_uv/properites.py
```python
__author__ = "Nutti <<EMAIL>>"
__status__ = "production"
__version__ = "5.1"
__date__ = "24 Feb 2018"
import bpy
from bpy.props import (
FloatProperty,
EnumProperty,
BoolProperty,
FloatVectorProperty,
IntProperty
)
from mathutils import Vector
from . import common
def get_loaded_texture_name(_, __):
items = [(key, key, "") for key in bpy.data.images.keys()]
items.append(("None", "None", ""))
return items
# Properties used in this add-on.
class MUV_Properties():
cpuv = None
cpuv_obj = None
cpuv_selseq = None
transuv = None
uvbb = None
texlock = None
texproj = None
texwrap = None
mvuv = None
uvinsp = None
uvsculpt = None
def __init__(self):
self.cpuv = MUV_CPUVProps()
self.cpuv_obj = MUV_CPUVProps()
self.cpuv_selseq = MUV_CPUVSelSeqProps()
self.transuv = MUV_TransUVProps()
self.uvbb = MUV_UVBBProps()
self.texlock = MUV_TexLockProps()
self.texproj = MUV_TexProjProps()
self.texwrap = MUV_TexWrapProps()
self.mvuv = MUV_MVUVProps()
self.uvinsp = MUV_UVInspProps()
self.uvsculpt = MUV_UVSculptProps()
class MUV_CPUVProps():
src_uvs = []
src_pin_uvs = []
src_seams = []
class MUV_CPUVSelSeqProps():
src_uvs = []
src_pin_uvs = []
src_seams = []
class MUV_TransUVProps():
topology_copied = []
class MUV_TexProjProps():
running = False
class MUV_UVBBProps():
uv_info_ini = []
ctrl_points_ini = []
ctrl_points = []
running = False
class MUV_TexLockProps():
verts_orig = None
intr_verts_orig = None
intr_running = False
class MUV_TexWrapProps():
ref_face_index = -1
ref_obj = None
class MUV_MVUVProps():
running = False
class MUV_UVInspProps():
display_running = False
overlapped_info = []
flipped_info = []
class MUV_UVSculptProps():
running = False
def init_props(scene):
scene.muv_props = MUV_Properties()
# UV Sculpt
scene.muv_uvsculpt_enabled = BoolProperty(
name="UV Sculpt",
description="UV Sculpt is enabled",
default=False
)
scene.muv_uvsculpt_radius = IntProperty(
name="Radius",
description="Radius of the brush",
min=1,
max=500,
default=30
)
scene.muv_uvsculpt_strength = FloatProperty(
name="Strength",
description="How powerful the effect of the brush when applied",
min=0.0,
max=1.0,
default=0.03,
)
scene.muv_uvsculpt_tools = EnumProperty(
name="Tools",
description="Select Tools for the UV sculpt brushes",
items=[
('GRAB', "Grab", "Grab UVs"),
('RELAX', "Relax", "Relax UVs"),
('PINCH', "Pinch", "Pinch UVs")
],
default='GRAB'
)
scene.muv_uvsculpt_show_brush = BoolProperty(
name="Show Brush",
description="Show Brush",
default=True
)
scene.muv_uvsculpt_pinch_invert = BoolProperty(
name="Invert",
description="Pinch UV to invert direction",
default=False
)
scene.muv_uvsculpt_relax_method = EnumProperty(
name="Method",
description="Algorithm used for relaxation",
items=[
('HC', "HC", "Use HC method for relaxation"),
('LAPLACIAN', "Laplacian", "Use laplacian method for relaxation")
],
default='HC'
)
# Texture Wrap
scene.muv_texwrap_enabled = BoolProperty(
name="Texture Wrap",
description="Texture Wrap is enabled",
default=False
)
scene.muv_texwrap_set_and_refer = BoolProperty(
name="Set and Refer",
description="Refer and set UV",
default=True
)
scene.muv_texwrap_selseq = BoolProperty(
name="Selection Sequence",
description="Set UV sequentially",
default=False
)
# UV inspection
scene.muv_seluv_enabled = BoolProperty(
name="Select UV Enabled",
description="Select UV is enabled",
default=False
)
scene.muv_uvinsp_enabled = BoolProperty(
name="UV Inspection Enabled",
description="UV Inspection is enabled",
default=False
)
scene.muv_uvinsp_show_overlapped = BoolProperty(
name="Overlapped",
description="Show overlapped UVs",
default=False
)
scene.muv_uvinsp_show_flipped = BoolProperty(
name="Flipped",
description="Show flipped UVs",
default=False
)
scene.muv_uvinsp_show_mode = EnumProperty(
name="Mode",
description="Show mode",
items=[
('PART', "Part", "Show only overlapped/flipped part"),
('FACE', "Face", "Show overlapped/flipped face")
],
default='PART'
)
# Align UV
scene.muv_auv_enabled = BoolProperty(
name="Aline UV Enabled",
description="Align UV is enabled",
default=False
)
scene.muv_auv_transmission = BoolProperty(
name="Transmission",
description="Align linked UVs",
default=False
)
scene.muv_auv_select = BoolProperty(
name="Select",
description="Select UVs which are aligned",
default=False
)
scene.muv_auv_vertical = BoolProperty(
name="Vert-Infl (Vertical)",
description="Align vertical direction influenced "
"by mesh vertex proportion",
default=False
)
scene.muv_auv_horizontal = BoolProperty(
name="Vert-Infl (Horizontal)",
description="Align horizontal direction influenced "
"by mesh vertex proportion",
default=False
)
scene.muv_auv_location = EnumProperty(
name="Location",
description="Align location",
items=[
('LEFT_TOP', "Left/Top", "Align to Left or Top"),
('MIDDLE', "Middle", "Align to middle"),
('RIGHT_BOTTOM', "Right/Bottom", "Align to Right or Bottom")
],
default='MIDDLE'
)
# Smooth UV
scene.muv_smuv_enabled = BoolProperty(
name="Smooth UV Enabled",
description="Smooth UV is enabled",
default=False
)
scene.muv_smuv_transmission = BoolProperty(
name="Transmission",
description="Smooth linked UVs",
default=False
)
scene.muv_smuv_mesh_infl = FloatProperty(
name="Mesh Influence",
description="Influence rate of mesh vertex",
min=0.0,
max=1.0,
default=0.0
)
scene.muv_smuv_select = BoolProperty(
name="Select",
description="Select UVs which are smoothed",
default=False
)
# UV Bounding Box
scene.muv_uvbb_enabled = BoolProperty(
name="UV Bounding Box Enabled",
description="UV Bounding Box is enabled",
default=False
)
scene.muv_uvbb_uniform_scaling = BoolProperty(
name="Uniform Scaling",
description="Enable Uniform Scaling",
default=False
)
scene.muv_uvbb_boundary = EnumProperty(
name="Boundary",
description="Boundary",
default='UV_SEL',
items=[
('UV', "UV", "Boundary is decided by UV"),
('UV_SEL', "UV (Selected)", "Boundary is decided by Selected UV")
]
)
# Pack UV
scene.muv_packuv_enabled = BoolProperty(
name="Pack UV Enabled",
description="Pack UV is enabled",
default=False
)
scene.muv_packuv_allowable_center_deviation = FloatVectorProperty(
name="Allowable Center Deviation",
description="Allowable center deviation to judge same UV island",
min=0.000001,
max=0.1,
default=(0.001, 0.001),
size=2
)
scene.muv_packuv_allowable_size_deviation = FloatVectorProperty(
name="Allowable Size Deviation",
description="Allowable sizse deviation to judge same UV island",
min=0.000001,
max=0.1,
default=(0.001, 0.001),
size=2
)
# Move UV
scene.muv_mvuv_enabled = BoolProperty(
name="Move UV Enabled",
description="Move UV is enabled",
default=False
)
# UVW
scene.muv_uvw_enabled = BoolProperty(
name="UVW Enabled",
description="UVW is enabled",
default=False
)
scene.muv_uvw_assign_uvmap = BoolProperty(
name="Assign UVMap",
description="Assign UVMap when no UVmaps are available",
default=True
)
# Texture Projection
scene.muv_texproj_enabled = BoolProperty(
name="Texture Projection Enabled",
description="Texture Projection is enabled",
default=False
)
scene.muv_texproj_tex_magnitude = FloatProperty(
name="Magnitude",
description="Texture Magnitude",
default=0.5,
min=0.0,
max=100.0
)
scene.muv_texproj_tex_image = EnumProperty(
name="Image",
description="Texture Image",
items=get_loaded_texture_name
)
scene.muv_texproj_tex_transparency = FloatProperty(
name="Transparency",
description="Texture Transparency",
default=0.2,
min=0.0,
max=1.0
)
scene.muv_texproj_adjust_window = BoolProperty(
name="Adjust Window",
description="Size of renderered texture is fitted to window",
default=True
)
scene.muv_texproj_apply_tex_aspect = BoolProperty(
name="Texture Aspect Ratio",
description="Apply Texture Aspect ratio to displayed texture",
default=True
)
scene.muv_texproj_assign_uvmap = BoolProperty(
name="Assign UVMap",
description="Assign UVMap when no UVmaps are available",
default=True
)
# Texture Lock
scene.muv_texlock_enabled = BoolProperty(
name="Texture Lock Enabled",
description="Texture Lock is enabled",
default=False
)
scene.muv_texlock_connect = BoolProperty(
name="Connect UV",
default=True
)
# World Scale UV
scene.muv_wsuv_enabled = BoolProperty(
name="World Scale UV Enabled",
description="World Scale UV is enabled",
default=False
)
scene.muv_wsuv_src_mesh_area = FloatProperty(
name="Mesh Area",
description="Source Mesh Area",
default=0.0,
min=0.0
)
scene.muv_wsuv_src_uv_area = FloatProperty(
name="UV Area",
description="Source UV Area",
default=0.0,
min=0.0
)
scene.muv_wsuv_src_density = FloatProperty(
name="Density",
description="Source Texel Density",
default=0.0,
min=0.0
)
scene.muv_wsuv_tgt_density = FloatProperty(
name="Density",
description="Target Texel Density",
default=0.0,
min=0.0
)
scene.muv_wsuv_mode = EnumProperty(
name="Mode",
description="Density calculation mode",
items=[
('PROPORTIONAL', 'Proportional', 'Scale proportionally by mesh'),
('SCALING', 'Scaling', 'Specify scale factor'),
('USER', 'User', 'Specify density'),
('CONSTANT', 'Constant', 'Constant density')
],
default='CONSTANT'
)
scene.muv_wsuv_scaling_factor = FloatProperty(
name="Scaling Factor",
default=1.0,
max=1000.0,
min=0.00001
)
scene.muv_wsuv_origin = EnumProperty(
name="Origin",
description="Aspect Origin",
items=[
('CENTER', 'Center', 'Center'),
('LEFT_TOP', 'Left Top', 'Left Bottom'),
('LEFT_CENTER', 'Left Center', 'Left Center'),
('LEFT_BOTTOM', 'Left Bottom', 'Left Bottom'),
('CENTER_TOP', 'Center Top', 'Center Top'),
('CENTER_BOTTOM', 'Center Bottom', 'Center Bottom'),
('RIGHT_TOP', 'Right Top', 'Right Top'),
('RIGHT_CENTER', 'Right Center', 'Right Center'),
('RIGHT_BOTTOM', 'Right Bottom', 'Right Bottom')
],
default='CENTER'
)
# Unwrap Constraint
scene.muv_unwrapconst_enabled = BoolProperty(
name="Unwrap Constraint Enabled",
description="Unwrap Constraint is enabled",
default=False
)
scene.muv_unwrapconst_u_const = BoolProperty(
name="U-Constraint",
description="Keep UV U-axis coordinate",
default=False
)
scene.muv_unwrapconst_v_const = BoolProperty(
name="V-Constraint",
description="Keep UV V-axis coordinate",
default=False
)
# Preserve UV Aspect
scene.muv_preserve_uv_enabled = BoolProperty(
name="Preserve UV Aspect Enabled",
description="Preserve UV Aspect is enabled",
default=False
)
scene.muv_preserve_uv_tex_image = EnumProperty(
name="Image",
description="Texture Image",
items=get_loaded_texture_name
)
scene.muv_preserve_uv_origin = EnumProperty(
name="Origin",
description="Aspect Origin",
items=[
('CENTER', 'Center', 'Center'),
('LEFT_TOP', 'Left Top', 'Left Bottom'),
('LEFT_CENTER', 'Left Center', 'Left Center'),
('LEFT_BOTTOM', 'Left Bottom', 'Left Bottom'),
('CENTER_TOP', 'Center Top', 'Center Top'),
('CENTER_BOTTOM', 'Center Bottom', 'Center Bottom'),
('RIGHT_TOP', 'Right Top', 'Right Top'),
('RIGHT_CENTER', 'Right Center', 'Right Center'),
('RIGHT_BOTTOM', 'Right Bottom', 'Right Bottom')
],
default="CENTER"
)
# Flip/Rotate UV
scene.muv_fliprot_enabled = BoolProperty(
name="Flip/Rotate UV Enabled",
description="Flip/Rotate UV is enabled",
default=False
)
scene.muv_fliprot_seams = BoolProperty(
name="Seams",
description="Seams",
default=True
)
# Mirror UV
scene.muv_mirroruv_enabled = BoolProperty(
name="Mirror UV Enabled",
description="Mirror UV is enabled",
default=False
)
scene.muv_mirroruv_axis = EnumProperty(
items=[
('X', "X", "Mirror Along X axis"),
('Y', "Y", "Mirror Along Y axis"),
('Z', "Z", "Mirror Along Z axis")
],
name="Axis",
description="Mirror Axis",
default='X'
)
# Copy/Paste UV
scene.muv_cpuv_enabled = BoolProperty(
name="Copy/Paste UV Enabled",
description="Copy/Paste UV is enabled",
default=False
)
scene.muv_cpuv_copy_seams = BoolProperty(
name="Copy Seams",
description="Copy Seams",
default=True
)
scene.muv_cpuv_mode = EnumProperty(
items=[
('DEFAULT', "Default", "Default Mode"),
('SEL_SEQ', "Selection Sequence", "Selection Sequence Mode")
],
name="Copy/Paste UV Mode",
description="Copy/Paste UV Mode",
default='DEFAULT'
)
scene.muv_cpuv_strategy = EnumProperty(
name="Strategy",
description="Paste Strategy",
items=[
('N_N', 'N:N', 'Number of faces must be equal to source'),
('N_M', 'N:M', 'Number of faces must not be equal to source')
],
default='N_M'
)
# Transfer UV
scene.muv_transuv_enabled = BoolProperty(
name="Transfer UV Enabled",
description="Transfer UV is enabled",
default=False
)
scene.muv_transuv_invert_normals = BoolProperty(
name="Invert Normals",
description="Invert Normals",
default=False
)
scene.muv_transuv_copy_seams = BoolProperty(
name="Copy Seams",
description="Copy Seams",
default=True
)
# Align UV Cursor
def auvc_get_cursor_loc(self):
area, _, space = common.get_space('IMAGE_EDITOR', 'WINDOW',
'IMAGE_EDITOR')
bd_size = common.get_uvimg_editor_board_size(area)
loc = space.cursor_location
if bd_size[0] < 0.000001:
cx = 0.0
else:
cx = loc[0] / bd_size[0]
if bd_size[1] < 0.000001:
cy = 0.0
else:
cy = loc[1] / bd_size[1]
self['muv_auvc_cursor_loc'] = Vector((cx, cy))
return self.get('muv_auvc_cursor_loc', (0.0, 0.0))
def auvc_set_cursor_loc(self, value):
self['muv_auvc_cursor_loc'] = value
area, _, space = common.get_space('IMAGE_EDITOR', 'WINDOW',
'IMAGE_EDITOR')
bd_size = common.get_uvimg_editor_board_size(area)
cx = bd_size[0] * value[0]
cy = bd_size[1] * value[1]
space.cursor_location = Vector((cx, cy))
scene.muv_auvc_enabled = BoolProperty(
name="Align UV Cursor Enabled",
description="Align UV Cursor is enabled",
default=False
)
scene.muv_auvc_cursor_loc = FloatVectorProperty(
name="UV Cursor Location",
size=2,
precision=4,
soft_min=-1.0,
soft_max=1.0,
step=1,
default=(0.000, 0.000),
get=auvc_get_cursor_loc,
set=auvc_set_cursor_loc
)
scene.muv_auvc_align_menu = EnumProperty(
name="Align Method",
description="Align Method",
default='TEXTURE',
items=[
('TEXTURE', "Texture", "Align to texture"),
('UV', "UV", "Align to UV"),
('UV_SEL', "UV (Selected)", "Align to Selected UV")
]
)
# UV Cursor Location
scene.muv_uvcloc_enabled = BoolProperty(
name="UV Cursor Location Enabled",
description="UV Cursor Location is enabled",
default=False
)
def clear_props(scene):
del scene.muv_props
# UV Sculpt
del scene.muv_uvsculpt_enabled
del scene.muv_uvsculpt_radius
del scene.muv_uvsculpt_strength
del scene.muv_uvsculpt_tools
del scene.muv_uvsculpt_show_brush
del scene.muv_uvsculpt_pinch_invert
del scene.muv_uvsculpt_relax_method
# Texture Wrap
del scene.muv_texwrap_enabled
del scene.muv_texwrap_set_and_refer
del scene.muv_texwrap_selseq
# UV Inspection
del scene.muv_seluv_enabled
del scene.muv_uvinsp_enabled
del scene.muv_uvinsp_show_overlapped
del scene.muv_uvinsp_show_flipped
del scene.muv_uvinsp_show_mode
# Align UV
del scene.muv_auv_enabled
del scene.muv_auv_transmission
del scene.muv_auv_select
del scene.muv_auv_vertical
del scene.muv_auv_horizontal
del scene.muv_auv_location
# Smooth UV
del scene.muv_smuv_enabled
del scene.muv_smuv_transmission
del scene.muv_smuv_mesh_infl
del scene.muv_smuv_select
# UV Bounding Box
del scene.muv_uvbb_enabled
del scene.muv_uvbb_uniform_scaling
del scene.muv_uvbb_boundary
# Pack UV
del scene.muv_packuv_enabled
del scene.muv_packuv_allowable_center_deviation
del scene.muv_packuv_allowable_size_deviation
# Move UV
del scene.muv_mvuv_enabled
# UVW
del scene.muv_uvw_enabled
del scene.muv_uvw_assign_uvmap
# Texture Projection
del scene.muv_texproj_enabled
del scene.muv_texproj_tex_magnitude
del scene.muv_texproj_tex_image
del scene.muv_texproj_tex_transparency
del scene.muv_texproj_adjust_window
del scene.muv_texproj_apply_tex_aspect
del scene.muv_texproj_assign_uvmap
# Texture Lock
del scene.muv_texlock_enabled
del scene.muv_texlock_connect
# World Scale UV
del scene.muv_wsuv_enabled
del scene.muv_wsuv_src_mesh_area
del scene.muv_wsuv_src_uv_area
del scene.muv_wsuv_src_density
del scene.muv_wsuv_tgt_density
del scene.muv_wsuv_mode
del scene.muv_wsuv_scaling_factor
del scene.muv_wsuv_origin
# Unwrap Constraint
del scene.muv_unwrapconst_enabled
del scene.muv_unwrapconst_u_const
del scene.muv_unwrapconst_v_const
# Preserve UV Aspect
del scene.muv_preserve_uv_enabled
del scene.muv_preserve_uv_tex_image
del scene.muv_preserve_uv_origin
# Flip/Rotate UV
del scene.muv_fliprot_enabled
del scene.muv_fliprot_seams
# Mirror UV
del scene.muv_mirroruv_enabled
del scene.muv_mirroruv_axis
# Copy/Paste UV
del scene.muv_cpuv_enabled
del scene.muv_cpuv_copy_seams
del scene.muv_cpuv_mode
del scene.muv_cpuv_strategy
# Transfer UV
del scene.muv_transuv_enabled
del scene.muv_transuv_invert_normals
del scene.muv_transuv_copy_seams
# Align UV Cursor
del scene.muv_auvc_enabled
del scene.muv_auvc_cursor_loc
del scene.muv_auvc_align_menu
# UV Cursor Location
del scene.muv_uvcloc_enabled
```
#### File: uv_magic_uv/ui/view3d_uv_mapping.py
```python
__author__ = "Nutti <<EMAIL>>"
__status__ = "production"
__version__ = "5.1"
__date__ = "24 Feb 2018"
import bpy
from ..op import texture_projection
from ..op import unwrap_constraint
from ..op import uvw
class OBJECT_PT_MUV_UVMapping(bpy.types.Panel):
"""
Panel class: UV Mapping on Property Panel on View3D
"""
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_label = "UV Mapping"
bl_category = "Magic UV"
bl_context = 'mesh_edit'
bl_options = {'DEFAULT_CLOSED'}
def draw_header(self, _):
layout = self.layout
layout.label(text="", icon='IMAGE_COL')
def draw(self, context):
sc = context.scene
props = sc.muv_props
layout = self.layout
box = layout.box()
box.prop(sc, "muv_unwrapconst_enabled", text="Unwrap Constraint")
if sc.muv_unwrapconst_enabled:
ops = box.operator(
unwrap_constraint.MUV_UnwrapConstraint.bl_idname,
text="Unwrap")
ops.u_const = sc.muv_unwrapconst_u_const
ops.v_const = sc.muv_unwrapconst_v_const
row = box.row(align=True)
row.prop(sc, "muv_unwrapconst_u_const", text="U-Constraint")
row.prop(sc, "muv_unwrapconst_v_const", text="V-Constraint")
box = layout.box()
box.prop(sc, "muv_texproj_enabled", text="Texture Projection")
if sc.muv_texproj_enabled:
row = box.row()
if not props.texproj.running:
row.operator(texture_projection.MUV_TexProjStart.bl_idname,
text="Start", icon='PLAY')
else:
row.operator(texture_projection.MUV_TexProjStop.bl_idname,
text="Stop", icon='PAUSE')
row.prop(sc, "muv_texproj_tex_image", text="")
box.prop(sc, "muv_texproj_tex_transparency", text="Transparency")
col = box.column(align=True)
row = col.row()
row.prop(sc, "muv_texproj_adjust_window", text="Adjust Window")
if not sc.muv_texproj_adjust_window:
row.prop(sc, "muv_texproj_tex_magnitude", text="Magnitude")
col.prop(sc, "muv_texproj_apply_tex_aspect",
text="Texture Aspect Ratio")
col.prop(sc, "muv_texproj_assign_uvmap", text="Assign UVMap")
if props.texproj.running:
box.operator(texture_projection.MUV_TexProjProject.bl_idname,
text="Project")
box = layout.box()
box.prop(sc, "muv_uvw_enabled", text="UVW")
if sc.muv_uvw_enabled:
row = box.row(align=True)
ops = row.operator(uvw.MUV_UVWBoxMap.bl_idname, text="Box")
ops.assign_uvmap = sc.muv_uvw_assign_uvmap
ops = row.operator(uvw.MUV_UVWBestPlanerMap.bl_idname,
text="Best Planner")
ops.assign_uvmap = sc.muv_uvw_assign_uvmap
box.prop(sc, "muv_uvw_assign_uvmap", text="Assign UVMap")
``` |
{
"source": "1minus1/porespy",
"score": 2
} |
#### File: porespy/metrics/__funcs__.py
```python
import numpy as np
import warnings
from skimage.measure import regionprops
import scipy.ndimage as spim
import scipy.spatial as sptl
from porespy.tools import extend_slice, mesh_region
from porespy.filters import find_dt_artifacts
from collections import namedtuple
from tqdm import tqdm
from scipy import fftpack as sp_ft
from skimage import measure
def representative_elementary_volume(im, npoints=1000):
r"""
Calculates the porosity of the image as a function subdomain size. This
function extracts a specified number of subdomains of random size, then
finds their porosity.
Parameters
----------
im : ND-array
The image of the porous material
npoints : int
The number of randomly located and sized boxes to sample. The default
is 1000.
Returns
-------
result : named_tuple
A tuple containing the *volume* and *porosity* of each subdomain
tested in arrays ``npoints`` long. They can be accessed as
attributes of the tuple. They can be conveniently plotted
by passing the tuple to matplotlib's ``plot`` function using the
\* notation: ``plt.plot(*result, 'b.')``. The resulting plot is
similar to the sketch given by Bachmat and Bear [1]
Notes
-----
This function is frustratingly slow. Profiling indicates that all the time
is spent on scipy's ``sum`` function which is needed to sum the number of
void voxels (1's) in each subdomain.
Also, this function is a prime target for parallelization since the
``npoints`` are calculated independenlty.
References
----------
[1] Bachmat and Bear. On the Concept and Size of a Representative
Elementary Volume (Rev), Advances in Transport Phenomena in Porous Media
(1987)
"""
im_temp = np.zeros_like(im)
crds = np.array(np.random.rand(npoints, im.ndim)*im.shape, dtype=int)
pads = np.array(np.random.rand(npoints)*np.amin(im.shape)/2+10, dtype=int)
im_temp[tuple(crds.T)] = True
labels, N = spim.label(input=im_temp)
slices = spim.find_objects(input=labels)
porosity = np.zeros(shape=(N,), dtype=float)
volume = np.zeros(shape=(N,), dtype=int)
for i in tqdm(np.arange(0, N)):
s = slices[i]
p = pads[i]
new_s = extend_slice(s, shape=im.shape, pad=p)
temp = im[new_s]
Vp = np.sum(temp)
Vt = np.size(temp)
porosity[i] = Vp/Vt
volume[i] = Vt
profile = namedtuple('profile', ('volume', 'porosity'))
profile.volume = volume
profile.porosity = porosity
return profile
def porosity_profile(im, axis):
r"""
Returns a porosity profile along the specified axis
Parameters
----------
im : ND-array
The volumetric image for which to calculate the porosity profile
axis : int
The axis (0, 1, or 2) along which to calculate the profile. For
instance, if `axis` is 0, then the porosity in each YZ plane is
calculated and returned as 1D array with 1 value for each X position.
Returns
-------
result : 1D-array
A 1D-array of porosity along the specified axis
"""
if axis >= im.ndim:
raise Exception('axis out of range')
im = np.atleast_3d(im)
a = set(range(im.ndim)).difference(set([axis]))
a1, a2 = a
prof = np.sum(np.sum(im, axis=a2), axis=a1)/(im.shape[a2]*im.shape[a1])
return prof*100
def radial_density(im, bins=10, voxel_size=1):
r"""
Computes radial density function by analyzing the histogram of voxel
values in the distance transform. This function is defined by
Torquato [1] as:
.. math::
\int_0^\infty P(r)dr = 1.0
where *P(r)dr* is the probability of finding a voxel at a lying at a radial
distance between *r* and *dr* from the solid interface. This is equivalent
to a probability density function (*pdf*)
The cumulative distribution is defined as:
.. math::
F(r) = \int_r^\infty P(r)dr
which gives the fraction of pore-space with a radius larger than *r*. This
is equivalent to the cumulative distribution function (*cdf*).
Parameters
----------
im : ND-array
Either a binary image of the pore space with ``True`` indicating the
pore phase (or phase of interest), or a pre-calculated distance
transform which can save time.
bins : int or array_like
This number of bins (if int) or the location of the bins (if array).
This argument is passed directly to Scipy's ``histogram`` function so
see that docstring for more information. The default is 10 bins, which
reduces produces a relatively smooth distribution.
voxel_size : scalar
The size of a voxel side in preferred units. The default is 1, so the
user can apply the scaling to the returned results after the fact.
Returns
-------
result : named_tuple
A named-tuple containing several 1D arrays:
*R* - radius, equivalent to ``bin_centers``
*pdf* - probability density function
*cdf* - cumulative density function
*bin_centers* - the center point of each bin
*bin_edges* - locations of bin divisions, including 1 more value than
the number of bins
*bin_widths* - useful for passing to the ``width`` argument of
``matplotlib.pyplot.bar``
Notes
-----
This function should not be taken as a pore size distribution in the
explict sense, but rather an indicator of the sizes in the image. The
distance transform contains a very skewed number of voxels with small
values near the solid walls. Nonetheless, it does provide a useful
indicator and it's mathematical formalism is handy.
Torquato refers to this as the *pore-size density function*, and mentions
that it is also known as the *pore-size distribution function*. These
terms are avoided here since they have specific connotations in porous
media analysis.
References
----------
[1] <NAME>. Random Heterogeneous Materials: Mircostructure and
Macroscopic Properties. Springer, New York (2002) - See page 48 & 292
"""
if im.dtype == bool:
im = spim.distance_transform_edt(im)
mask = find_dt_artifacts(im) == 0
im[mask] = 0
x = im[im > 0].flatten()
h = np.histogram(x, bins=bins, density=True)
h = _parse_histogram(h=h, voxel_size=voxel_size)
rdf = namedtuple('radial_density_function',
('R', 'pdf', 'cdf', 'bin_centers', 'bin_edges',
'bin_widths'))
return rdf(h.bin_centers, h.pdf, h.cdf, h.bin_centers, h.bin_edges,
h.bin_widths)
def porosity(im):
r"""
Calculates the porosity of an image assuming 1's are void space and 0's are
solid phase.
All other values are ignored, so this can also return the relative
fraction of a phase of interest in trinary or multiphase images.
Parameters
----------
im : ND-array
Image of the void space with 1's indicating void phase (or True) and
0's indicating the solid phase (or False).
Returns
-------
porosity : float
Calculated as the sum of all 1's divided by the sum of all 1's and 0's.
See Also
--------
phase_fraction
Notes
-----
This function assumes void is represented by 1 and solid by 0, and all
other values are ignored. This is useful, for example, for images of
cylindrical cores, where all voxels outside the core are labelled with 2.
Alternatively, images can be processed with ``find_disconnected_voxels``
to get an image of only blind pores. This can then be added to the orignal
image such that blind pores have a value of 2, thus allowing the
calculation of accessible porosity, rather than overall porosity.
"""
im = np.array(im, dtype=int)
Vp = np.sum(im == 1)
Vs = np.sum(im == 0)
e = Vp/(Vs + Vp)
return e
def two_point_correlation_bf(im, spacing=10):
r"""
Calculates the two-point correlation function using brute-force (see Notes)
Parameters
----------
im : ND-array
The image of the void space on which the 2-point correlation is desired
spacing : int
The space between points on the regular grid that is used to generate
the correlation (see Notes)
Returns
-------
result : named_tuple
A tuple containing the x and y data for plotting the two-point
correlation function, using the *args feature of matplotlib's plot
function. The x array is the distances between points and the y array
is corresponding probabilities that points of a given distance both
lie in the void space. The distance values are binned as follows:
``bins = range(start=0, stop=np.amin(im.shape)/2, stride=spacing)``
Notes
-----
The brute-force approach means overlaying a grid of equally spaced points
onto the image, calculating the distance between each and every pair of
points, then counting the instances where both pairs lie in the void space.
This approach uses a distance matrix so can consume memory very quickly for
large 3D images and/or close spacing.
"""
if im.ndim != im.squeeze().ndim:
warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
' Reduce dimensionality with np.squeeze(im) to avoid' +
' unexpected behavior.')
if im.ndim == 2:
pts = np.meshgrid(range(0, im.shape[0], spacing),
range(0, im.shape[1], spacing))
crds = np.vstack([pts[0].flatten(),
pts[1].flatten()]).T
elif im.ndim == 3:
pts = np.meshgrid(range(0, im.shape[0], spacing),
range(0, im.shape[1], spacing),
range(0, im.shape[2], spacing))
crds = np.vstack([pts[0].flatten(),
pts[1].flatten(),
pts[2].flatten()]).T
dmat = sptl.distance.cdist(XA=crds, XB=crds)
hits = im[tuple(pts)].flatten()
dmat = dmat[hits, :]
h1 = np.histogram(dmat, bins=range(0, int(np.amin(im.shape)/2), spacing))
dmat = dmat[:, hits]
h2 = np.histogram(dmat, bins=h1[1])
tpcf = namedtuple('two_point_correlation_function',
('distance', 'probability'))
return tpcf(h2[1][:-1], h2[0]/h1[0])
def _radial_profile(autocorr, r_max, nbins=100):
r"""
Helper functions to calculate the radial profile of the autocorrelation
Masks the image in radial segments from the center and averages the values
The distance values are normalized and 100 bins are used as default.
Parameters
----------
autocorr : ND-array
The image of autocorrelation produced by FFT
r_max : int or float
The maximum radius in pixels to sum the image over
Returns
-------
result : named_tuple
A named tupling containing an array of ``bins`` of radial position
and an array of ``counts`` in each bin.
"""
if len(autocorr.shape) == 2:
adj = np.reshape(autocorr.shape, [2, 1, 1])
inds = np.indices(autocorr.shape) - adj/2
dt = np.sqrt(inds[0]**2 + inds[1]**2)
elif len(autocorr.shape) == 3:
adj = np.reshape(autocorr.shape, [3, 1, 1, 1])
inds = np.indices(autocorr.shape) - adj/2
dt = np.sqrt(inds[0]**2 + inds[1]**2 + inds[2]**2)
else:
raise Exception('Image dimensions must be 2 or 3')
bin_size = np.int(np.ceil(r_max/nbins))
bins = np.arange(bin_size, r_max, step=bin_size)
radial_sum = np.zeros_like(bins)
for i, r in enumerate(bins):
# Generate Radial Mask from dt using bins
mask = (dt <= r) * (dt > (r-bin_size))
radial_sum[i] = np.sum(autocorr[mask])/np.sum(mask)
# Return normalized bin and radially summed autoc
norm_autoc_radial = radial_sum/np.max(autocorr)
tpcf = namedtuple('two_point_correlation_function',
('distance', 'probability'))
return tpcf(bins, norm_autoc_radial)
def two_point_correlation_fft(im):
r"""
Calculates the two-point correlation function using fourier transforms
Parameters
----------
im : ND-array
The image of the void space on which the 2-point correlation is desired
Returns
-------
result : named_tuple
A tuple containing the x and y data for plotting the two-point
correlation function, using the *args feature of matplotlib's plot
function. The x array is the distances between points and the y array
is corresponding probabilities that points of a given distance both
lie in the void space.
Notes
-----
The fourier transform approach utilizes the fact that the autocorrelation
function is the inverse FT of the power spectrum density.
For background read the Scipy fftpack docs and for a good explanation see:
http://www.ucl.ac.uk/~ucapikr/projects/KamilaSuankulova_BSc_Project.pdf
"""
# Calculate half lengths of the image
hls = (np.ceil(np.shape(im))/2).astype(int)
# Fourier Transform and shift image
F = sp_ft.ifftshift(sp_ft.fftn(sp_ft.fftshift(im)))
# Compute Power Spectrum
P = np.absolute(F**2)
# Auto-correlation is inverse of Power Spectrum
autoc = np.absolute(sp_ft.ifftshift(sp_ft.ifftn(sp_ft.fftshift(P))))
tpcf = _radial_profile(autoc, r_max=np.min(hls))
return tpcf
def pore_size_distribution(im, bins=10, log=True, voxel_size=1):
r"""
Calculate a pore-size distribution based on the image produced by the
``porosimetry`` or ``local_thickness`` functions.
Parameters
----------
im : ND-array
The array of containing the sizes of the largest sphere that overlaps
each voxel. Obtained from either ``porosimetry`` or
``local_thickness``.
bins : scalar or array_like
Either an array of bin sizes to use, or the number of bins that should
be automatically generated that span the data range.
log : boolean
If ``True`` (default) the size data is converted to log (base-10)
values before processing. This can help to plot wide size
distributions or to better visualize the in the small size region.
Note that you can anti-log the radii values in the retunred ``tuple``,
but the binning is performed on the logged radii values.
voxel_size : scalar
The size of a voxel side in preferred units. The default is 1, so the
user can apply the scaling to the returned results after the fact.
Returns
-------
result : named_tuple
A named-tuple containing several values:
*R* or *logR* - radius, equivalent to ``bin_centers``
*pdf* - probability density function
*cdf* - cumulative density function
*satn* - phase saturation in differential form. For the cumulative
saturation, just use *cfd* which is already normalized to 1.
*bin_centers* - the center point of each bin
*bin_edges* - locations of bin divisions, including 1 more value than
the number of bins
*bin_widths* - useful for passing to the ``width`` argument of
``matplotlib.pyplot.bar``
Notes
-----
(1) To ensure the returned values represent actual sizes you can manually
scale the input image by the voxel size first (``im *= voxel_size``)
plt.bar(psd.R, psd.satn, width=psd.bin_widths, edgecolor='k')
"""
im = im.flatten()
vals = im[im > 0]*voxel_size
if log:
vals = np.log10(vals)
h = _parse_histogram(np.histogram(vals, bins=bins, density=True))
psd = namedtuple('pore_size_distribution',
(log*'log' + 'R', 'pdf', 'cdf', 'satn',
'bin_centers', 'bin_edges', 'bin_widths'))
return psd(h.bin_centers, h.pdf, h.cdf, h.relfreq,
h.bin_centers, h.bin_edges, h.bin_widths)
def _parse_histogram(h, voxel_size=1):
delta_x = h[1]
P = h[0]
temp = P*(delta_x[1:] - delta_x[:-1])
C = np.cumsum(temp[-1::-1])[-1::-1]
S = P*(delta_x[1:] - delta_x[:-1])
bin_edges = delta_x * voxel_size
bin_widths = (delta_x[1:] - delta_x[:-1]) * voxel_size
bin_centers = ((delta_x[1:] + delta_x[:-1])/2) * voxel_size
psd = namedtuple('histogram', ('pdf', 'cdf', 'relfreq',
'bin_centers', 'bin_edges', 'bin_widths'))
return psd(P, C, S, bin_centers, bin_edges, bin_widths)
def chord_counts(im):
r"""
Finds the length of each chord in the supplied image and returns a list
of their individual sizes
Parameters
----------
im : ND-array
An image containing chords drawn in the void space.
Returns
-------
result : 1D-array
A 1D array with one element for each chord, containing its length.
Notes
----
The returned array can be passed to ``plt.hist`` to plot the histogram,
or to ``np.histogram`` to get the histogram data directly. Another useful
function is ``np.bincount`` which gives the number of chords of each
length in a format suitable for ``plt.plot``.
"""
labels, N = spim.label(im > 0)
props = regionprops(labels, coordinates='xy')
chord_lens = np.array([i.filled_area for i in props])
return chord_lens
def linear_density(im, bins=25, voxel_size=1, log=False):
r"""
Determines the probability that a point lies within a certain distance
of the opposite phase *along a specified direction*
This relates directly the radial density function defined by Torquato [1],
but instead of reporting the probability of lying within a stated distance
to the nearest solid in any direciton, it considers only linear distances
along orthogonal directions.The benefit of this is that anisotropy can be
detected in materials by performing the analysis in multiple orthogonal
directions.
Parameters
----------
im : ND-array
An image with each voxel containing the distance to the nearest solid
along a linear path, as produced by ``distance_transform_lin``.
bins : int or array_like
The number of bins or a list of specific bins to use
voxel_size : scalar
The side length of a voxel. This is used to scale the chord lengths
into real units. Note this is applied *after* the binning, so
``bins``, if supplied, should be in terms of voxels, not length units.
Returns
-------
result : named_tuple
References
----------
[1] Torquato, S. Random Heterogeneous Materials: Mircostructure and
Macroscopic Properties. Springer, New York (2002)
"""
x = im[im > 0]
h = list(np.histogram(x, bins=bins, density=True))
h = _parse_histogram(h=h, voxel_size=voxel_size)
cld = namedtuple('linear_density_function',
('L', 'pdf', 'cdf', 'relfreq',
'bin_centers', 'bin_edges', 'bin_widths'))
return cld(h.bin_centers, h.pdf, h.cdf, h.relfreq,
h.bin_centers, h.bin_edges, h.bin_widths)
def chord_length_distribution(im, bins=None, log=False, voxel_size=1,
normalization='count'):
r"""
Determines the distribution of chord lengths in an image containing chords.
Parameters
----------
im : ND-image
An image with chords drawn in the pore space, as produced by
``apply_chords`` or ``apply_chords_3d``.
``im`` can be either boolean, in which case each chord will be
identified using ``scipy.ndimage.label``, or numerical values in which
case it is assumed that chords have already been identifed and labeled.
In both cases, the size of each chord will be computed as the number
of voxels belonging to each labelled region.
bins : scalar or array_like
If a scalar is given it is interpreted as the number of bins to use,
and if an array is given they are used as the bins directly.
log : Boolean
If true, the logarithm of the chord lengths will be used, which can
make the data more clear.
normalization : string
Indicates how to normalize the bin heights. Options are:
*'count' or 'number'* - (default) This simply counts the number of
chords in each bin in the normal sense of a histogram. This is the
rigorous definition according to Torquato [1].
*'length'* - This multiplies the number of chords in each bin by the
chord length (i.e. bin size). The normalization scheme accounts for
the fact that long chords are less frequent than shorert chords,
thus giving a more balanced distribution.
voxel_size : scalar
The size of a voxel side in preferred units. The default is 1, so the
user can apply the scaling to the returned results after the fact.
Returns
-------
result : named_tuple
A tuple containing the following elements, which can be retrieved by
attribute name:
*L* or *logL* - chord length, equivalent to ``bin_centers``
*pdf* - probability density function
*cdf* - cumulative density function
*relfreq* - relative frequency chords in each bin. The sum of all bin
heights is 1.0. For the cumulative relativce, use *cdf* which is
already normalized to 1.
*bin_centers* - the center point of each bin
*bin_edges* - locations of bin divisions, including 1 more value than
the number of bins
*bin_widths* - useful for passing to the ``width`` argument of
``matplotlib.pyplot.bar``
References
----------
[1] <NAME>. Random Heterogeneous Materials: Mircostructure and
Macroscopic Properties. Springer, New York (2002) - See page 45 & 292
"""
x = chord_counts(im)
if bins is None:
bins = np.array(range(0, x.max()+2))*voxel_size
x = x*voxel_size
if log:
x = np.log10(x)
if normalization == 'length':
h = list(np.histogram(x, bins=bins, density=False))
h[0] = h[0]*(h[1][1:]+h[1][:-1])/2 # Scale bin heigths by length
h[0] = h[0]/h[0].sum()/(h[1][1:]-h[1][:-1]) # Normalize h[0] manually
elif normalization in ['number', 'count']:
h = np.histogram(x, bins=bins, density=True)
else:
raise Exception('Unsupported normalization:', normalization)
h = _parse_histogram(h)
cld = namedtuple('chord_length_distribution',
(log*'log' + 'L', 'pdf', 'cdf', 'relfreq',
'bin_centers', 'bin_edges', 'bin_widths'))
return cld(h.bin_centers, h.pdf, h.cdf, h.relfreq,
h.bin_centers, h.bin_edges, h.bin_widths)
def region_interface_areas(regions, areas, voxel_size=1, strel=None):
r"""
Calculates the interfacial area between all pairs of adjecent regions
Parameters
----------
regions : ND-array
An image of the pore space partitioned into individual pore regions.
Note that zeros in the image will not be considered for area
calculation.
areas : array_like
A list containing the areas of each regions, as determined by
``region_surface_area``. Note that the region number and list index
are offset by 1, such that the area for region 1 is stored in
``areas[0]``.
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1.
strel : array_like
The structuring element used to blur the region. If not provided,
then a spherical element (or disk) with radius 1 is used. See the
docstring for ``mesh_region`` for more details, as this argument is
passed to there.
Returns
-------
result : named_tuple
A named-tuple containing 2 arrays. ``conns`` holds the connectivity
information and ``area`` holds the result for each pair. ``conns`` is
a N-regions by 2 array with each row containing the region number of an
adjacent pair of regions. For instance, if ``conns[0, 0]`` is 0 and
``conns[0, 1]`` is 5, then row 0 of ``area`` contains the interfacial
area shared by regions 0 and 5.
"""
print('_'*60)
print('Finding interfacial areas between each region')
from skimage.morphology import disk, square, ball, cube
im = regions.copy()
if im.ndim != im.squeeze().ndim:
warnings.warn('Input image conains a singleton axis:' + str(im.shape) +
' Reduce dimensionality with np.squeeze(im) to avoid' +
' unexpected behavior.')
if im.ndim == 2:
cube = square
ball = disk
# Get 'slices' into im for each region
slices = spim.find_objects(im)
# Initialize arrays
Ps = np.arange(1, np.amax(im)+1)
sa = np.zeros_like(Ps, dtype=float)
sa_combined = [] # Difficult to preallocate since number of conns unknown
cn = []
# Start extracting area from im
for i in tqdm(Ps):
reg = i - 1
if slices[reg] is not None:
s = extend_slice(slices[reg], im.shape)
sub_im = im[s]
mask_im = sub_im == i
sa[reg] = areas[reg]
im_w_throats = spim.binary_dilation(input=mask_im,
structure=ball(1))
im_w_throats = im_w_throats*sub_im
Pn = np.unique(im_w_throats)[1:] - 1
for j in Pn:
if j > reg:
cn.append([reg, j])
merged_region = im[(min(slices[reg][0].start,
slices[j][0].start)):
max(slices[reg][0].stop,
slices[j][0].stop),
(min(slices[reg][1].start,
slices[j][1].start)):
max(slices[reg][1].stop,
slices[j][1].stop)]
merged_region = ((merged_region == reg + 1) +
(merged_region == j + 1))
mesh = mesh_region(region=merged_region, strel=strel)
sa_combined.append(mesh_surface_area(mesh))
# Interfacial area calculation
cn = np.array(cn)
ia = 0.5 * (sa[cn[:, 0]] + sa[cn[:, 1]] - sa_combined)
ia[ia <= 0] = 1
result = namedtuple('interfacial_areas', ('conns', 'area'))
result.conns = cn
result.area = ia * voxel_size**2
return result
def region_surface_areas(regions, voxel_size=1, strel=None):
r"""
Extracts the surface area of each region in a labeled image.
Optionally, it can also find the the interfacial area between all
adjoining regions.
Parameters
----------
regions : ND-array
An image of the pore space partitioned into individual pore regions.
Note that zeros in the image will not be considered for area
calculation.
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1.
strel : array_like
The structuring element used to blur the region. If not provided,
then a spherical element (or disk) with radius 1 is used. See the
docstring for ``mesh_region`` for more details, as this argument is
passed to there.
Returns
-------
result : list
A list containing the surface area of each region, offset by 1, such
that the surface area of region 1 is stored in element 0 of the list.
"""
print('_'*60)
print('Finding surface area of each region')
im = regions.copy()
# Get 'slices' into im for each pore region
slices = spim.find_objects(im)
# Initialize arrays
Ps = np.arange(1, np.amax(im)+1)
sa = np.zeros_like(Ps, dtype=float)
# Start extracting marching cube area from im
for i in tqdm(Ps):
reg = i - 1
if slices[reg] is not None:
s = extend_slice(slices[reg], im.shape)
sub_im = im[s]
mask_im = sub_im == i
mesh = mesh_region(region=mask_im, strel=strel)
sa[reg] = mesh_surface_area(mesh)
result = sa * voxel_size**2
return result
def mesh_surface_area(mesh=None, verts=None, faces=None):
r"""
Calculates the surface area of a meshed region
Parameters
----------
mesh : tuple
The tuple returned from the ``mesh_region`` function
verts : array
An N-by-ND array containing the coordinates of each mesh vertex
faces : array
An N-by-ND array indicating which elements in ``verts`` form a mesh
element.
Returns
-------
surface_area : float
The surface area of the mesh, calculated by
``skimage.measure.mesh_surface_area``
Notes
-----
This function simply calls ``scikit-image.measure.mesh_surface_area``, but
it allows for the passing of the ``mesh`` tuple returned by the
``mesh_region`` function, entirely for convenience.
"""
if mesh:
verts = mesh.verts
faces = mesh.faces
else:
if (verts is None) or (faces is None):
raise Exception('Either mesh or verts and faces must be given')
surface_area = measure.mesh_surface_area(verts, faces)
return surface_area
def phase_fraction(im, normed=True):
r"""
Calculates the number (or fraction) of each phase in an image
Parameters
----------
im : ND-array
An ND-array containing integer values
normed : Boolean
If ``True`` (default) the returned values are normalized by the total
number of voxels in image, otherwise the voxel count of each phase is
returned.
Returns
-------
result : 1D-array
A array of length max(im) with each element containing the number of
voxels found with the corresponding label.
See Also
--------
porosity
"""
if im.dtype == bool:
im = im.astype(int)
elif im.dtype != int:
raise Exception('Image must contain integer values for each phase')
labels = np.arange(0, np.amax(im)+1)
results = np.zeros_like(labels)
for i in labels:
results[i] = np.sum(im == i)
if normed:
results = results/im.size
return results
```
#### File: porespy/networks/__snow_dual__.py
```python
import numpy as np
from porespy.networks import regions_to_network
from porespy.networks import add_boundary_regions
from porespy.networks import label_boundary_cells
from porespy.networks import _net_dict
from porespy.tools import pad_faces
from porespy.filters import snow_partitioning
from porespy.metrics import region_surface_areas, region_interface_areas
def snow_dual(im,
voxel_size=1,
boundary_faces=['top', 'bottom', 'left', 'right', 'front', 'back'],
marching_cubes_area=False):
r"""
Analyzes an image that has been partitioned into void and solid regions
and extracts the void and solid phase geometry as well as network
connectivity.
Parameters
----------
im : ND-array
Binary image in the Boolean form with True’s as void phase and False’s
as solid phase. It can process the inverted configuration of the
boolean image as well, but output labelling of phases will be inverted
and solid phase properties will be assigned to void phase properties
labels which will cause confusion while performing the simulation.
voxel_size : scalar
The resolution of the image, expressed as the length of one side of a
voxel, so the volume of a voxel would be **voxel_size**-cubed. The
default is 1, which is useful when overlaying the PNM on the original
image since the scale of the image is alway 1 unit lenth per voxel.
boundary_faces : list of strings
Boundary faces labels are provided to assign hypothetical boundary
nodes having zero resistance to transport process. For cubical
geometry, the user can choose ‘left’, ‘right’, ‘top’, ‘bottom’,
‘front’ and ‘back’ face labels to assign boundary nodes. If no label is
assigned then all six faces will be selected as boundary nodes
automatically which can be trimmed later on based on user requirements.
marching_cubes_area : bool
If ``True`` then the surface area and interfacial area between regions
will be using the marching cube algorithm. This is a more accurate
representation of area in extracted network, but is quite slow, so
it is ``False`` by default. The default method simply counts voxels
so does not correctly account for the voxelated nature of the images.
Returns
-------
A dictionary containing all the void and solid phase size data, as well as
the network topological information. The dictionary names use the OpenPNM
convention (i.e. 'pore.coords', 'throat.conns') so it may be converted
directly to an OpenPNM network object using the ``update`` command.
* ``net``: A dictionary containing all the void and solid phase size data,
as well as the network topological information. The dictionary names
use the OpenPNM convention (i.e. 'pore.coords', 'throat.conns') so it
may be converted directly to an OpenPNM network object using the
``update`` command.
* ``im``: The binary image of the void space
* ``dt``: The combined distance transform of the image
* ``regions``: The void and solid space partitioned into pores and solids
phases using a marker based watershed with the peaks found by the
SNOW Algorithm.
References
----------
[1] <NAME>. "A versatile and efficient network extraction algorithm
using marker-based watershed segmenation". Phys. Rev. E 96, 023307 (2017)
[2] <NAME> et al. "Dual network extraction algorithm to investigate
multiple transport processes in porous materials: Image-based modeling
of pore and grain-scale processes. Computers and Chemical Engineering.
123(6), 64-77 (2019)
"""
# -------------------------------------------------------------------------
# SNOW void phase
pore_regions = snow_partitioning(im, return_all=True)
# SNOW solid phase
solid_regions = snow_partitioning(~im, return_all=True)
# -------------------------------------------------------------------------
# Combined Distance transform of two phases.
pore_dt = pore_regions.dt
solid_dt = solid_regions.dt
dt = pore_dt + solid_dt
# Calculates combined void and solid regions for dual network extraction
pore_regions = pore_regions.regions
solid_regions = solid_regions.regions
pore_region = pore_regions*im
solid_region = solid_regions*~im
solid_num = np.amax(pore_regions)
solid_region = solid_region + solid_num
solid_region = solid_region * ~im
regions = pore_region + solid_region
b_num = np.amax(regions)
# -------------------------------------------------------------------------
# Boundary Conditions
regions = add_boundary_regions(regions=regions, faces=boundary_faces)
# -------------------------------------------------------------------------
# Padding distance transform to extract geometrical properties
dt = pad_faces(im=dt, faces=boundary_faces)
# -------------------------------------------------------------------------
# Extract void,solid and throat information from image
net = regions_to_network(im=regions, dt=dt, voxel_size=voxel_size)
# -------------------------------------------------------------------------
# Extract marching cube surface area and interfacial area of regions
if marching_cubes_area:
areas = region_surface_areas(regions=regions)
interface_area = region_interface_areas(regions=regions, areas=areas,
voxel_size=voxel_size)
net['pore.surface_area'] = areas * voxel_size**2
net['throat.area'] = interface_area.area
# -------------------------------------------------------------------------
# Find void to void, void to solid and solid to solid throat conns
loc1 = net['throat.conns'][:, 0] < solid_num
loc2 = net['throat.conns'][:, 1] >= solid_num
loc3 = net['throat.conns'][:, 1] < b_num
pore_solid_labels = loc1 * loc2 * loc3
loc4 = net['throat.conns'][:, 0] >= solid_num
loc5 = net['throat.conns'][:, 0] < b_num
solid_solid_labels = loc4 * loc2 * loc5 * loc3
loc6 = net['throat.conns'][:, 1] < solid_num
pore_pore_labels = loc1 * loc6
loc7 = net['throat.conns'][:, 1] >= b_num
boundary_throat_labels = loc5 * loc7
solid_labels = ((net['pore.label'] > solid_num) * ~
(net['pore.label'] > b_num))
boundary_labels = net['pore.label'] > b_num
b_sa = np.zeros(len(boundary_labels[boundary_labels == 1.0]))
# -------------------------------------------------------------------------
# Calculates void interfacial area that connects with solid and vice versa
p_conns = net['throat.conns'][:, 0][pore_solid_labels]
ps = net['throat.area'][pore_solid_labels]
p_sa = np.bincount(p_conns, ps)
s_conns = net['throat.conns'][:, 1][pore_solid_labels]
s_pa = np.bincount(s_conns, ps)
s_pa = np.trim_zeros(s_pa) # remove pore surface area labels
p_solid_surf = np.concatenate((p_sa, s_pa, b_sa))
# -------------------------------------------------------------------------
# Calculates interfacial area using marching cube method
if marching_cubes_area:
ps_c = net['throat.area'][pore_solid_labels]
p_sa_c = np.bincount(p_conns, ps_c)
s_pa_c = np.bincount(s_conns, ps_c)
s_pa_c = np.trim_zeros(s_pa_c) # remove pore surface area labels
p_solid_surf = np.concatenate((p_sa_c, s_pa_c, b_sa))
# -------------------------------------------------------------------------
# Adding additional information of dual network
net['pore.solid_void_area'] = (p_solid_surf * voxel_size**2)
net['throat.void'] = pore_pore_labels
net['throat.interconnect'] = pore_solid_labels
net['throat.solid'] = solid_solid_labels
net['throat.boundary'] = boundary_throat_labels
net['pore.void'] = net['pore.label'] <= solid_num
net['pore.solid'] = solid_labels
net['pore.boundary'] = boundary_labels
# -------------------------------------------------------------------------
# label boundary cells
net = label_boundary_cells(network=net, boundary_faces=boundary_faces)
# -------------------------------------------------------------------------
# assign out values to dummy dict
temp = _net_dict(net)
temp.im = im.copy()
temp.dt = dt
temp.regions = regions
return temp
```
#### File: test/unit/test_generators.py
```python
import porespy as ps
import numpy as np
import scipy as sp
import pytest
import scipy.ndimage as spim
import matplotlib.pyplot as plt
plt.close('all')
class GeneratorTest():
def setup_class(self):
np.random.seed(10)
def test_cylinders(self):
X = 100
Y = 100
# Fibers don't work in 2D
with pytest.raises(Exception):
im = ps.generators.cylinders(shape=[X, Y], radius=4, ncylinders=20)
# But this works
im = ps.generators.cylinders(shape=[1, X, Y], radius=1, ncylinders=20)
assert im.dtype == bool
assert np.shape(im.squeeze()) == (X, Y)
im = ps.generators.cylinders(shape=[50, 50, 50], radius=1,
ncylinders=20)
assert np.shape(im.squeeze()) == (50, 50, 50)
def test_insert_shape_center_defaults(self):
im = np.zeros([11, 11])
shape = np.ones([3, 3])
im = ps.generators.insert_shape(im, element=shape, center=[5, 5])
assert np.sum(im) == np.prod(shape.shape)
im = np.zeros([11, 11])
shape = np.ones([4, 4])
with pytest.raises(Exception):
im = ps.generators.insert_shape(im, element=shape, center=[5, 5])
def test_insert_shape_center_overlay(self):
im = np.ones([10, 10])
shape = np.ones([3, 3])
im = ps.generators.insert_shape(im, element=shape, center=[5, 5],
value=1.0, mode='overlay')
assert np.sum(im) == (np.prod(im.shape) + np.prod(shape.shape))
def test_insert_shape_corner_overwrite(self):
im = np.ones([10, 10])
shape = np.ones([3, 3])
im = ps.generators.insert_shape(im, element=shape, corner=[5, 5],
value=1.0, mode='overlay')
assert np.sum(im) == (np.prod(im.shape) + np.prod(shape.shape))
assert im[5, 5] == 2
assert im[4, 5] == 1 and im[5, 4] == 1
def test_insert_shape_center_outside_im(self):
im = np.zeros([11, 11])
shape = np.ones([3, 3])
im = ps.generators.insert_shape(im, element=shape, center=[-1, -1])
assert np.sum(im) == 1
im = np.zeros([11, 11])
shape = np.ones([3, 3])
im = ps.generators.insert_shape(im, element=shape, center=[0, -1])
assert np.sum(im) == 2
im = np.zeros([11, 11])
shape = np.ones([3, 3])
im = ps.generators.insert_shape(im, element=shape, center=[10, 10])
assert np.sum(im) == 4
im = np.zeros([11, 11])
shape = np.ones([3, 3])
im = ps.generators.insert_shape(im, element=shape, center=[14, 14])
assert np.sum(im) == 0
im = np.zeros([11, 11])
shape = np.ones([4, 4])
with pytest.raises(Exception):
im = ps.generators.insert_shape(im, element=shape, center=[10, 10])
im = np.zeros([11, 11])
shape = np.ones([4, 3])
with pytest.raises(Exception):
im = ps.generators.insert_shape(im, element=shape, center=[10, 10])
def test_insert_shape_corner_outside_im(self):
im = np.zeros([11, 11])
shape = np.ones([3, 3])
im = ps.generators.insert_shape(im, element=shape, corner=[-1, -1])
assert np.sum(im) == 4
im = np.zeros([11, 11])
shape = np.ones([3, 3])
im = ps.generators.insert_shape(im, element=shape, corner=[-1, 1])
assert np.sum(im) == 6
im = np.zeros([11, 11])
shape = np.ones([3, 3])
im = ps.generators.insert_shape(im, element=shape, corner=[-3, -3])
assert np.sum(im) == 0
im = np.zeros([11, 11])
shape = np.ones([3, 3])
im = ps.generators.insert_shape(im, element=shape, corner=[10, 9])
assert np.sum(im) == 2
im = np.zeros([11, 11])
shape = np.ones([3, 3])
im = ps.generators.insert_shape(im, element=shape, corner=[13, 13])
assert np.sum(im) == 0
im = np.zeros([11, 11])
shape = np.ones([3, 4])
im = ps.generators.insert_shape(im, element=shape, corner=[9, 9])
assert np.sum(im) == 4
im = np.zeros([11, 11])
shape = np.ones([3, 4])
im = ps.generators.insert_shape(im, element=shape, corner=[0, -1])
assert np.sum(im) == 9
def test_bundle_of_tubes(self):
im = ps.generators.bundle_of_tubes(shape=[101, 101, 1], spacing=10)
labels, N = spim.label(input=im)
assert N == 100
def test_overlapping_spheres_2d(self):
phis = np.arange(0.1, 0.9, 0.2)
for phi in phis:
im = ps.generators.overlapping_spheres(shape=[101, 101],
radius=5,
porosity=phi)
phi_actual = im.sum() / np.size(im)
assert abs(phi_actual - phi) < 0.02
def test_overlapping_spheres_3d(self):
phis = np.arange(0.1, 0.9, 0.2)
for phi in phis:
im = ps.generators.overlapping_spheres(shape=[100, 100, 50],
radius=8, porosity=phi)
phi_actual = im.sum() / np.size(im)
assert abs(phi_actual - phi) < 0.02
def test_polydisperse_spheres(self):
phis = np.arange(0.1, 0.9, 0.2)
dist = sp.stats.norm(loc=7, scale=2)
for phi in phis:
im = ps.generators.polydisperse_spheres(shape=[100, 100, 50],
porosity=phi, dist=dist,
nbins=10)
phi_actual = im.sum() / np.size(im)
assert abs(phi_actual - phi) < 0.1
def test_voronoi_edges(self):
np.random.seed(0)
im = ps.generators.voronoi_edges(shape=[50, 50, 50],
radius=2,
ncells=25,
flat_faces=True)
top_slice = im[:, :, 0]
assert np.sum(top_slice) == 1409
def test_lattice_spheres_square(self):
im = ps.generators.lattice_spheres(shape=[101, 101], radius=5,
offset=0, lattice='sc')
labels, N = spim.label(input=~im)
assert N == 100
def test_lattice_spheres_triangular(self):
im = ps.generators.lattice_spheres(shape=[101, 101], radius=5,
lattice='triangular')
labels, N = spim.label(input=~im)
assert N == 85
def test_lattice_spheres_sc(self):
im = ps.generators.lattice_spheres(shape=[101, 101, 101],
radius=4, offset=1,
lattice='sc')
labels, N = spim.label(input=~im)
assert N == 1000
def test_lattice_spheres_fcc(self):
im = ps.generators.lattice_spheres(shape=[101, 101, 101],
radius=4, offset=2,
lattice='fcc')
labels, N = spim.label(input=~im)
assert N == 392
def test_lattice_spheres_bcc(self):
im = ps.generators.lattice_spheres(shape=[101, 101, 101],
radius=4, offset=2,
lattice='bcc')
labels, N = spim.label(input=~im)
assert N == 1024
def test_noise_simplex(self):
pass
def test_noise_perlin(self):
pass
def test_blobs_1d_shape(self):
im = ps.generators.blobs(shape=[101])
assert len(list(im.shape)) == 3
def test_RSA_2d_single(self):
np.random.seed(0)
im = np.zeros([100, 100], dtype=int)
im = ps.generators.RSA(im, radius=10, volume_fraction=0.5)
assert np.sum(im > 0) == 5095
assert np.sum(im > 1) == 20
def test_RSA_2d_multi(self):
np.random.seed(0)
im = np.zeros([100, 100], dtype=int)
im = ps.generators.RSA(im, radius=10, volume_fraction=0.5)
im = ps.generators.RSA(im, radius=5, volume_fraction=0.75)
assert np.sum(im > 0) == 6520
assert np.sum(im > 1) == 44
def test_RSA_3d_single(self):
np.random.seed(0)
im = np.zeros([50, 50, 50], dtype=int)
im = ps.generators.RSA(im, radius=5, volume_fraction=0.5)
assert np.sum(im > 0) == 45602
assert np.sum(im > 1) == 121
def test_RSA_mask_edge_2d(self):
im = np.zeros([100, 100], dtype=int)
im = ps.generators.RSA(im, radius=10, volume_fraction=0.5,
mode='contained')
coords = np.argwhere(im == 2)
assert ~np.any(coords < 10)
assert ~np.any(coords > 90)
def test_RSA_mask_edge_3d(self):
im = np.zeros([50, 50, 50], dtype=int)
im = ps.generators.RSA(im, radius=5, volume_fraction=0.5,
mode='contained')
coords = np.argwhere(im == 2)
assert ~np.any(coords < 5)
assert ~np.any(coords > 45)
def test_line_segment(self):
X0 = [3, 4]
X1 = [5, 9]
L1, L2 = ps.generators.line_segment(X0, X1)
assert np.all(L1 == [3, 3, 4, 4, 5, 5])
assert np.all(L2 == [4, 5, 6, 7, 8, 9])
X0 = [3, 4, 5]
X1 = [5, 9, 13]
L1, L2, L3 = ps.generators.line_segment(X0, X1)
assert np.all(L1 == [3, 3, 4, 4, 4, 4, 4, 5, 5])
assert np.all(L2 == [4, 5, 5, 6, 6, 7, 8, 8, 9])
assert np.all(L3 == [5, 6, 7, 8, 9, 10, 11, 12, 13])
if __name__ == '__main__':
t = GeneratorTest()
self = t
t.setup_class()
for item in t.__dir__():
if item.startswith('test'):
print('running test: '+item)
t.__getattribute__(item)()
```
#### File: test/unit/test_visualization.py
```python
import porespy as ps
import numpy as np
class VisualizationTest():
def setup_class(self):
self.im = ps.generators.blobs(shape=[51, 51, 51])
def test_sem_x(self):
sem = ps.visualization.sem(self.im)
assert sem.ndim == 2
def test_xray_x(self):
xray = ps.visualization.xray(self.im)
assert np.sum(xray) == np.sum(~self.im)
def test_sem_y(self):
sem = ps.visualization.sem(self.im, direction='Y')
assert sem.ndim == 2
def test_xray_y(self):
xray = ps.visualization.xray(self.im, direction='Y')
assert np.sum(xray) == np.sum(~self.im)
def test_sem_z(self):
sem = ps.visualization.sem(self.im, direction='Z')
assert sem.ndim == 2
def test_xray_z(self):
xray = ps.visualization.xray(self.im, direction='Z')
assert np.sum(xray) == np.sum(~self.im)
if __name__ == '__main__':
t = VisualizationTest()
t.setup_class()
t.test_sem_x()
t.test_xray_x()
t.test_sem_y()
t.test_xray_y()
t.test_sem_z()
t.test_xray_z()
``` |
{
"source": "1MLightyears/AuraProII",
"score": 2
} |
#### File: AuraProII/src/Search.py
```python
import requests as rq
from time import sleep
from json import loads, dump
from PyQt5.QtCore import QMutex
from Base import settings, getNamebyID, TMsgEntry, Valuable, history, RGB2Hex, MDStyleStr, SaveFile, log
MutSearchName = QMutex()
MutSearchKM = QMutex()
def SearchName(name: str = '',ID:int=-1,is_strict=False):
"""
获取角色ID与zkb统计信息。
name(str):角色名
ID(int):如果已有角色ID则跳过搜索characterID的步骤
is_strict(bool):严格模式,只搜索与name完全一致的角色名
"""
global MutSearchName
MutSearchName.lock()
Msg = {}
log("SearchName:name={name},ID={ID},is_strict={is_strict}".format(name=name,ID=ID,is_strict=is_strict))
#characterID
if ID < 0: #如果没有给出characterID
name = name.strip(" \n")
for i in history:
if i.upper() == name.upper():
name = i
ID = history[name]["characterID"]
break
if ID < 0: #搜索历史记录未命中
log("SearchName:历史记录未命中")
strict = "&strict=true" if is_strict else "&strict=false"
url=r"https://esi.evetech.net/latest/search/?categories=character&datasource=tranquility&language=en-us&search="+name.replace(" ", "+")+strict
try:
with rq.get(url,timeout=5) as ret:
ret = loads(ret.content)
except Exception as e:
log("SearchName:"+str(e),level="error")
Msg.update({"Error":"esiError"})
MutSearchName.unlock()
return Msg
if "character" in ret:
ID = ret["character"]
if (len(ID) > 1):
log("SearchName:命中{l}/{lmax}个结果".format(l=len(ID),lmax=settings["ResultCountLimit"]))
#有多个命中结果
if len(ID) < settings["ResultCountLimit"]:
Msg.update({"NameList":ID})
MutSearchName.unlock()
return Msg
else:
#结果过多
Msg.update({
"TooManyResults": TMsgEntry("搜索结果数量超过" + \
str(settings["ResultCountLimit"]) + "个,改为严格模式...",
style_str=MDStyleStr(
color=settings["clHint"],
font_size=settings["labelFontSize"]
)),
"name": name
})
MutSearchName.unlock()
return Msg
else:
ID=ID[0]
else:
Msg.update({"Error": "NoSuchCharacterError"})
log("SearchName:esi查询失败:{name}".format(name=name),level="warning")
MutSearchName.unlock()
return Msg
Msg.update({"SearchName": [name, ID]})
#zkb
url = r"https://zkillboard.com/api/stats/characterID/" + str(ID) + r"/"
try:
with rq.get(url,timeout=5) as ret:
ret = loads(ret.content)
except Exception as e:
Msg.update({"Error":"zkbError"})
MutSearchName.unlock()
return Msg
#角色信息为None
if (ret == {}) or (ret["info"] == None):
Msg.update({"Error":"PlayerNoPVPData"})
MutSearchName.unlock()
return Msg
if "dangerRatio" not in ret:
ret["dangerRatio"]=0
danger_ratio_color = (int(ret["dangerRatio"] / 100 * 255),
int((1 - (ret["dangerRatio"] / 100)) * 255),0)
if "gangRatio" not in ret:
ret["gangRatio"]=100
solo_ratio_color = (int((1 - (ret["gangRatio"] / 100)) * 255),
int(ret["gangRatio"] / 100 * 255),0)
Msg.update({"SearchKB": {
"name": [ret["info"]["name"], TMsgEntry(r"<a href='https://zkillboard.com/character/" + str(ID) + r"' style='color:blue'>" + ret["info"]["name"] + "</a>",
style_str=MDStyleStr(color=settings["clURL"],font_size=settings["labelFontSize"]))],
"dangerRatio":[ret["dangerRatio"],TMsgEntry("危险度:" + str(ret["dangerRatio"]) + "%",
style_str=MDStyleStr(color=danger_ratio_color,font_size=settings["labelFontSize"]))],
"soloRatio": [ret["gangRatio"],TMsgEntry("solo率:" + str(100-ret["gangRatio"]) + "%",
style_str=MDStyleStr(color=solo_ratio_color,font_size=settings["labelFontSize"]))],
"topShips": [[i["shipTypeID"] for i in ret["topLists"][3]["values"][:3]],TMsgEntry("最高击杀舰船:" + ','.join([
getNamebyID(i["shipTypeID"])+ "(" + str(i["kills"]) + "次)"
for i in ret["topLists"][3]["values"][:3]
if i["shipName"] != "Capsule"
]),
style_str=MDStyleStr(color=settings["cltopShips"],font_size=settings["labelFontSize"]))],
"topSolarSystem":[[i["solarSystemName"] for i in ret["topLists"][4]["values"][:3]],TMsgEntry("最常出没:"+','.join([
i["solarSystemName"] + "(" + str(i["kills"]) + "次)"
for i in ret["topLists"][4]["values"][:3]
]),
style_str=MDStyleStr(color=settings["cltopSolarSystem"],font_size=settings["labelFontSize"]))]}})
history.update({ret["info"]["name"]: {"characterID": ID}})
SaveFile(history,settings["workingDir"]+"history.json")
#getKMList
url = r"https://zkillboard.com/api/kills/characterID/" + str(ID) + r"/"
try:
with rq.get(url,timeout=5) as ret:
ret = loads(ret.content)
except Exception as e:
log("getKMList:"+str(e),level="error")
Msg.update({"Error":"getKMListError"})
MutSearchName.unlock()
return Msg
#玩家可能没有击杀km
if ret == []:
Msg.update({"getKMList": []})
MutSearchName.unlock()
return Msg
km_count = min(settings["KMCounts"],len(ret))
killmail_pairs = [(ret[i]["killmail_id"], ret[i]["zkb"]["hash"]) for i in range(km_count)]
remap = []
label_list = [TMsgEntry(str(i+1)+".最近km("+Valuable(ret[i]["zkb"]["totalValue"])+' isk)',
style_str=MDStyleStr(color=settings["clKM"],font_size=settings["labelFontSize"]),
ClickEvent=SearchKM,
ClickArgs=(ID, killmail_pairs[i][0], killmail_pairs[i][1])) for i in range(len(killmail_pairs))]
for i in killmail_pairs:
remap.append([i, label_list[0], {}])
label_list = label_list[1:]
Msg.update({"getKMList": remap})
log("getKMList:完成")
MutSearchName.unlock()
return Msg
def SearchKM(character_id:int,killmail_id: int = 0, killmail_hash: str = ''):
"""
获取一个特定的km
character_id(int):角色id
killmail_id(int):该killmail的id
killmail_hash(str):由ccp给出的km哈希值
"""
global MutSearchKM
MutSearchKM.lock()
Msg = {}
log("SearchKM:character_id={character_id},killmail_id={killmail_id},killmail_hash={killmail_hash}".format(character_id=character_id,killmail_id=killmail_id,killmail_hash=killmail_hash))
url = r"https://esi.evetech.net/latest/killmails/"\
+ str(killmail_id)\
+ r"/" + killmail_hash + r"/?datasource=tranquility"
try:
with rq.get(url,timeout=5) as ret:
ret = loads(ret.content)
except Exception as e:
log("SearchKM:"+str(e),level="error")
Msg.update({"Error":"SearchKMError"})
MutSearchKM.unlock()
return -1
Msg = {"SearchKM": {"time": [ret["killmail_time"].replace('T', ' ').replace('Z', ''),TMsgEntry(" ("+ret["killmail_time"].replace('T', ' ').replace('Z', ''+")"),
style_str=MDStyleStr(color=settings["clHint"],font_size=settings["labelFontSize"]))
],
"victimShip": [ret["victim"]["ship_type_id"],TMsgEntry(
r" <a href='https://zkillboard.com/kill/"+str(killmail_id)+r"/' style='color:blue'>击毁:"+getNamebyID(ret["victim"]["ship_type_id"])+r"</a>",
style_str=MDStyleStr(color=settings["clURL"],font_size=settings["labelFontSize"]))
]
}}
for i in ret["attackers"]:
if ("character_id"in i)and(i["character_id"] == character_id):
ship=getNamebyID(i["ship_type_id"]) if "ship_type_id" in i else "(?)"
weapon = getNamebyID(i["weapon_type_id"]) if "weapon_type_id" in i else "(?)"
if ship == weapon:
weapon="(混合)"
Msg["SearchKM"].update({
"shipType":[ship,TMsgEntry(
" · " + ship,
style_str=MDStyleStr(color=settings["clshipType"],font_size=settings["labelFontSize"])
)],
"weaponType":[weapon,TMsgEntry(
" · " + weapon,
style_str=MDStyleStr(color=settings["clweaponType"],font_size=settings["labelFontSize"])
)]})
break
log("SearchKM:完成")
MutSearchKM.unlock()
return Msg
def addName(ID: int,no:int=0):
"""
为characterID获取name。
ID(int):要搜索name的ID。
no(int):第no个搜索项。
"""
url=r"https://esi.evetech.net/latest/characters/"+str(ID)+r"/?datasource=tranquility"
Msg = {"addName": []}
try:
with rq.get(url,timeout=5) as ret:
ret = loads(ret.content)
except Exception as e:
log("addName:" + str(e), level="error")
return Msg
log("addName:ID={ID}已获取到".format(ID=ID))
if "name" in ret:
history.update({ret["name"]:{"characterID":ID}})
Msg["addName"].append([ID,
TMsgEntry(ret["name"],
ClickEvent=SearchName,
ClickArgs=(ret["name"],ID),
style_str=MDStyleStr(color=settings["claddName"],font_size=settings["labelFontSize"]))]
)
return Msg
``` |
{
"source": "1MLightyears/baike",
"score": 3
} |
#### File: baike/baike/baike.py
```python
import re
from sys import stderr
from typing import List
from os.path import exists
import time
import requests as rq
from lxml import html
class Baike:
"""
主对象部分。进行百科搜索。
"""
# private
def __init__(self, *args, **kwargs):
self.reset()
self.setting(*args, **kwargs)
def __regularize(self, i: int, j: int):
"""
将i规范到-j~j-1之间,以便于列表索引。
"""
if i >= j:
i = j - 1
elif i < -j:
i = -j
return i
def __getTitles(self, doc):
"""
获取主副标题。
doc(lxml.html):需要获取标题的页面
"""
self.title = doc.xpath(
"//dd[@class='lemmaWgt-lemmaTitle-title J-lemma-title']/h1/text()"
)[0]
self.subtitle = doc.xpath(
"//dd[@class='lemmaWgt-lemmaTitle-title J-lemma-title']/h2/text()"
)
if self.subtitle != []:
self.subtitle = self.subtitle[0]
else:
self.subtitle = ""
def __getSummaryPic(self, url: str):
"""
保存某个义项的概要图。
"""
# 先获取到不带附加参数的url链接
url = re.search(r"^(.*?)\?", url).group(1)
try:
ir = rq.get(url, stream=True)
pic_path = f"{self.__setup['keyword']}_{str(self.__setup['no'][0])}_{time.strftime('%Y%m%d%H%M%S',time.localtime())}.jpg"
if not exists(pic_path):
if ir.status_code == 200:
with open(pic_path, "wb") as f: # 默认图片为jpg格式
for chunk in ir:
f.write(chunk)
return True # 成功存图返回True
except rq.exceptions.Timeout:
stderr.write("超时错误:" + url + ";" + "HTTP状态码:" + str(ir.status_code) + "\n")
return False # 存图失败返回False
def __getParagraph(self, url: str):
"""
获取词条的内容简介。
"""
endl = "ANewLine"
try:
ret = rq.get(url, headers=self.__header, timeout=self.__setup["timeout"])
except rq.exceptions.Timeout:
stderr.write("超时错误:" + url + ";" + "HTTP状态码:" + str(ret.status_code) + "\n")
return ""
doc = html.fromstring(ret.text)
# 换了一个新页面,重新获取一次标题副标题
self.__getTitles(doc)
# 获取summary图
if self.__setup["pic"]:
img = doc.xpath("//div[@class='summary-pic']//img")
if img != []:
self.__getSummaryPic(img[0].attrib["src"])
self.text = ""
# 如果no的第二个参数是空列表,那么显示段落目录
if self.__setup["no"][1] == []:
self.text = "【目录】" + endl + "0简介" + endl
index = doc.xpath("//dt[@class='catalog-title level1']")
for item in index:
self.text += item.text_content() + endl
# 处理词条文本,分成段落
para_list = []
# 某些带头部海报的页面,简介单独放置,因此需要单独选出
post_title = doc.xpath("//body/div[3]")
if post_title != []:
if post_title[0].attrib["class"] != "body-wrapper":
para_list.append(
post_title[0]
.xpath(".//div[@class='lemma-summary']")[0]
.text_content()
)
div_list = doc.xpath("//div[@class='content']/div/div")
# 某些页面的结构有所不同:比如某些电影页面,其主要内容多一个div warp
list_check = [
i
for i in div_list
if ("class" in i.attrib.keys()) and ("main_tab" in i.attrib["class"])
]
if list_check != []:
div_list = list_check[0].xpath("./div")
for div in div_list:
attrib_class = div.attrib.get("class", "")
if not attrib_class:
continue
elif "lemma-summary" in attrib_class:
# 是简介部分
para_list.append(div.text_content())
elif "level-2" in attrib_class:
# 一个段落的标题
para_list.append(f"{len(para_list)}.")
for t in div.getchildren()[0].itertext():
# 段落标题是一个<h2>,这个标签底下有一个<span>会影响分切出的
# 段落标题,因此需要过滤掉这个<span>,它的text和页面标题一致。
# 拼接出适合阅读的标题
if t != self.title:
para_list[len(para_list) - 1] += t + " "
elif ("para" in attrib_class) and ("style" not in attrib_class):
# 前一个段落的内容,且不是图片,添加内容到列表的最后一个里
para_list[len(para_list) - 1] += endl + div.text_content()
elif "album-list" in attrib_class:
# 内容结束
break
# 选出对应片段
for i in self.__setup["no"][1]:
self.text += para_list[self.__regularize(i, len(para_list))] + endl
# 对description进行后期处理
# 删去\xa0
self.text = re.sub(r"\xa0", "", self.text)
# 删去角标部分
self.text = re.sub(r"\[[0-9\-]*?\]", "", self.text)
# 删去换行符
self.text = re.sub(r"[\n\r]", "", self.text)
# 把换行标记ANewLine变成\n
self.text = re.sub(endl, "\n", self.text)
# 处理完毕,拼接结果
return self.title + self.subtitle + "\n" + self.text
def __getEntries(self, url: str):
"""
获取义项列表。
"""
try:
ret = rq.get(url, headers=self.__header, timeout=self.__setup["timeout"])
except rq.exceptions.Timeout:
stderr.write("超时错误:" + url + ";" + "HTTP状态码:" + str(ret.status_code) + "\n")
return ""
doc = html.fromstring(ret.text)
# 现在我们在第一个义项页面里
self.__getTitles(doc)
# 获取义项列表
self.entrylist = doc.xpath(
"//ul[@class='polysemantList-wrapper cmn-clearfix']//li/*"
)
# 如果义项列表是空的,说明这是个单义词,为其添加标题
if self.entrylist == []:
self.entrylist = [html.HtmlElement()]
self.entrylist[0].text = self.title + "\n" + self.subtitle
# 为使得第i个索引指向第i个义项,需要添加一个dummy0号义项在entrylist里
self.entrylist = [html.HtmlElement()] + self.entrylist
# 为能返回正确的url,对其他url添加头部
for i in range(len(self.entrylist)):
if self.entrylist[i].attrib.has_key("href"):
self.entrylist[i].attrib["href"] = (
"https://baike.baidu.com" + self.entrylist[i].attrib["href"]
)
else:
# 没有href属性的是当前义项,为它加一个url
self.entrylist[i].attrib["href"] = url
# 对no进行处理
# 如果no是单个整数,把它变成列表
if isinstance(self.__setup["no"], int):
self.__setup["no"] = [self.__setup["no"], [0]]
# 获取第no[0]号义项的内容
if self.__setup["no"][0] != 0:
return self.__getParagraph(
self.entrylist[
self.__regularize(self.__setup["no"][0], len(self.entrylist))
].attrib["href"]
)
elif self.__setup["no"][0] == 0:
# 如果no[0]是0那么说明要求显示义项列表
entries = ""
self.title = self.__setup["keyword"]
for i in range(1, len(self.entrylist)):
entries += str(i) + ":" + self.entrylist[i].text + "\n"
# 处理完毕,拼接结果
return self.title + "\n" + entries
def __call__(self, *args, **kwargs):
ret = self.setting(*args, **kwargs)
if ret == 0:
return self.query()
else:
return ""
# public
def query(self):
"""
搜索关键字。
"""
if self.setting() != 0:
return ""
# 获取搜索结果
try:
ret = rq.get(
"https://baike.baidu.com/search?word=" + self.__setup["keyword"],
headers=self.__header,
timeout=self.__setup["timeout"],
)
except rq.exceptions.Timeout:
stderr.write(
"超时错误:"
+ "https://baike.baidu.com/search?word="
+ self.__setup["keyword"]
+ ";"
+ "HTTP状态码:"
+ str(ret.status_code)
+ "\n"
)
return ""
ret.encoding = "utf-8"
doc = html.fromstring(ret.text)
x = "//div[@class='searchResult']/dl[1]/dd[1]/a[@class='result-title']"
ans = doc.xpath(x)
if ans == []:
stderr.write("没有匹配的搜索结果:" + self.__setup["keyword"] + "\n")
return ""
url = ans[0].attrib["href"]
if url[0] == "/":
url = "https://baike.baidu.com" + url
return self.__getEntries(url)
def setting(self, *args, **kwargs):
"""
设置搜索关键字和header。
keyword(str):要搜索的关键字。默认为None,这时返回空字符串。
no(List of int):第一个参数:整数no1。
为整数时,获取第no1个义项;
为0时,获取义项列表;
负数的no1意味着从最后一个义项开始倒数。
默认为1。
第二个参数:列表no2.
为空(“[]”)时,获取目录;
为0时,获取简介段落;
为整数时,依次获取各段落并拼接。
默认为[0]。
timeout(int):请求的超时限制。超时后会报错'超时错误'并返回空字符串。默认为5(秒)。
pic(bool):是否下载简介图片。默认为False。
如果设置合法,该函数返回0。如果设置不合法,该函数返回大于0的值,此时调用query()会报错。
"""
# 用户设置部分
for i, j in zip(self.__setup.keys(), args):
self.__setup[i] = j
self.__setup.update(kwargs)
# 检查各变量是否合法
# keyword
if not isinstance(self.__setup["keyword"], str):
stderr.write("参数不正确:keyword必须是字符串\n")
return 1
# no
if not (
(
isinstance(self.__setup["no"], int)
or (
isinstance(self.__setup["no"], List)
and (len(self.__setup["no"]) == 2)
and (isinstance(self.__setup["no"][0], int))
and (isinstance(self.__setup["no"][1], List))
and (
[i for i in self.__setup["no"][1] if not isinstance(i, int)]
== []
)
)
)
):
stderr.write("参数不正确:no必须是整数或描述段落的列表\n")
return 2
# timeout
if self.__setup["timeout"] <= 0:
stderr.write("参数不正确:timeout必须大于0\n")
return 3
# pic
if not isinstance(self.__setup["pic"], bool):
stderr.write("参数不正确:pic必须是True或False\n")
return 4
# 自动获取部分
# header
# 暂时使用Firefox的header
self.__header = {
"Host": "baike.baidu.com",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:75.0) Gecko/20100101 Firefox/75.0",
"Accept": "application/json, text/javascript, */*; q=0.01",
"Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
"Accept-Encoding": "gzip, deflate",
}
self.title = ""
self.subtitle = ""
self.text = ""
return 0
def reset(self):
self.__setup = {"keyword": "", "no": [1, [0]], "timeout": 5, "pic": False}
# 提供一个预先定义好的对象getBaike方便直接调用
def getBaike(*args, **kwargs):
return Baike()(*args, **kwargs)
``` |
{
"source": "1MLightyears/clarisse",
"score": 2
} |
#### File: clarisse/clarisse/types_supported.py
```python
from PySide2.QtWidgets import (
QLabel,
QSpinBox,
QLineEdit,
QDoubleSpinBox,
QTextEdit,
QTableWidget,
QCheckBox,
QTableWidgetItem,
)
from PySide2.QtCore import Qt
import typing
from . import log
__all__ = [
"ClrsUnknown",
"ClrsInt",
"ClrsFloat",
"ClrsString",
"ClrsList",
"ClrsBool",
"ClrsDict",
]
class ClrsUnknown(QLineEdit):
__name__ = "ClrsUnknown"
def __init__(self, name: str = "", target_kwargs: dict = {}, *args, **kwargs):
super(ClrsUnknown, self).__init__(*args, **kwargs)
if name != "":
self.setObjectName(name)
self.target_kwargs = target_kwargs
def setDefault(self, default=None):
if default != None:
self.setPlaceholderText(str(default))
def getValue(self):
return self.text() or self.placeholderText()
class ClrsString(QLineEdit):
__name__ = "ClrsString"
def __init__(self, name: str = "", target_kwargs: dict = {}, *args, **kwargs):
super(ClrsString, self).__init__(*args, **kwargs)
if name != "":
self.setObjectName(name)
self.target_kwargs = target_kwargs
def setDefault(self, default=None):
if default != None:
self.setPlaceholderText(default)
def getValue(self):
return self.text() or self.placeholderText()
class ClrsInt(QSpinBox):
__name__ = "ClrsInt"
def __init__(self, name: str = "", target_kwargs: dict = {}, *args, **kwargs):
super(ClrsInt, self).__init__(*args, **kwargs)
if name != "":
self.setObjectName(name)
self.target_kwargs = target_kwargs
def setDefault(self, default=None):
if default != None:
self.setValue(default)
def getValue(self):
return self.value()
class ClrsFloat(QDoubleSpinBox):
__name__ = "ClrsFloat"
def __init__(self, name: str = "", target_kwargs: dict = {}, *args, **kwargs):
super(ClrsFloat, self).__init__(*args, **kwargs)
if name != "":
self.setObjectName(name)
self.target_kwargs = target_kwargs
def setDefault(self, default=None):
if default != None:
self.setValue(default)
def getValue(self):
return self.value()
class ClrsList(QTextEdit):
__name__ = "ClrsList"
def __init__(
self,
name: str = "",
target_kwargs: dict = {},
ElementType: type = None,
*args,
**kwargs
):
super(ClrsList, self).__init__(*args, **kwargs)
if name != "":
self.setObjectName(name)
self.target_kwargs = target_kwargs
self.ElementType = ElementType
def setDefault(self, default=None):
if isinstance(default, list):
self.setPlaceholderText("\n".join([str(i) for i in default]))
def getValue(self):
t = self.placeholderText() if self.toPlainText() == "" else self.toPlainText()
if (isinstance(self.ElementType, tuple)) and (len(self.ElementType) > 0):
return [self.ElementType(i) for i in t.split("\n")]
return [i for i in t.split("\n")]
class ClrsBool(QCheckBox):
__name__ = "ClrsBool"
def __init__(self, name: str = "", target_kwargs: dict = {}, *args, **kwargs):
super(ClrsBool, self).__init__(*args, **kwargs)
if name != "":
self.setObjectName(name)
self.target_kwargs = target_kwargs
def setDefault(self, default=None):
if default != None:
self.setChecked(default)
def getValue(self):
return self.isChecked()
class ClrsDict(QTableWidget):
__name__ = "ClrsDict"
def __init__(
self,
name: str = "",
target_kwargs: dict = {},
ElementType=None,
*args,
**kwargs
):
super(ClrsDict, self).__init__(*args, **kwargs)
if name != "":
self.setObjectName(name)
self.target_kwargs = target_kwargs
self.setDefaultDropAction(Qt.TargetMoveAction)
self.ElementType = ElementType
def setDefault(self, default=None):
if default != None:
# create a 2 column table
keys = list(default.keys())
keys_count = len(keys)
self.setRowCount(keys_count)
self.setColumnCount(2)
for i in range(keys_count):
k, v = QTableWidgetItem(), QTableWidgetItem()
k.setText(str(keys[i]))
v.setText(str(default[keys[i]]))
self.setItem(i, 0, k)
self.setItem(i, 1, v)
# TODO:add approaches to add/delete items
def getValue(self):
ret = {}
for i in range(self.rowCount()):
k, v = self.item(i, 0).text(), self.item(i, 1).text()
if isinstance(self.ElementType, tuple):
if len(self.ElementType) > 0:
k = self.ElementType[0](k)
if len(self.ElementType) > 1:
v = self.ElementType[1](v)
ret.update({k: v})
return ret
``` |
{
"source": "1MLightyears/easylog",
"score": 3
} |
#### File: easylog/easylog/__init__.py
```python
from .logger import Logger, loggers, Format, brackets
from .trigger import error_loggers
import sys
from functools import partial
__all__ = [
"default_logger", "log", "print", "error_loggers", "Logger", "brackets",
"info", "warning", "error", "fatal", "debug"
]
default_logger = Logger()
def log(*args, sep=" ", level="info", dest=sys.stderr):
"""
Write the log message.
In fact, this function broadcast a log request to all loggers, see if any
logger answer the request.
"""
for logger in loggers:
logger.log(sep.join([str(i) for i in args]), level, dest)
def print(*args, **kwargs):
"""
'print' the log message to designated dest and level for once.
The default logger(i.e. default_logger) will always answer, while
other loggers answer when their dest & level match the designated dest &
level.
"""
global default_logger
sep = kwargs.get("sep", " ")
l = default_logger.level
d = default_logger.dest
default_logger.level = kwargs.get("level", "info")
default_logger.dest = kwargs.get("dest", sys.stderr)
# default_logger would respond in any cases,
# other loggers would respond if their conditions met.
log(*args, sep=sep, level=default_logger.level, dest=default_logger.dest)
default_logger.level = l
default_logger.dest = d
# for legacy use
info = partial(log, level="info")
warning = partial(log, level="warning")
error = partial(log, level="error")
fatal = partial(log, level="fatal")
debug = partial(log, level="debug")
``` |
{
"source": "1MLightyears/latexify_py",
"score": 2
} |
#### File: 1MLightyears/latexify_py/setup.py
```python
import setuptools
def main():
with open('README.md', 'r') as fp:
readme = fp.read()
setuptools.setup(
name='latexify-py',
version='0.0.7',
description='Generates LaTeX source from Python functions.',
long_description=readme,
long_description_type='text/markdown',
url='https://github.com/google/latexify_py',
author='<NAME>',
author_email='<EMAIL>',
license='Apache Software License 2.0',
classifiers=[
'Framework :: IPython',
'Framework :: Jupyter',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Code Generators',
'Topic :: Text Processing :: Markup :: LaTeX',
],
keywords='equation latex math mathematics',
packages=['latexify'],
install_requires=[
'dill>=0.3.2',
],
python_requires='>=3.6, <3.9',
)
main()
```
#### File: latexify_py/tests/node_visitor_base_test.py
```python
import pytest
from latexify import node_visitor_base
class MockVisitor(node_visitor_base.NodeVisitorBase):
"""Mock visitor class."""
def __init__(self):
# Dummy member to fail visitor invocation.
self.visit_Baz = None # pylint: disable=invalid-name
def generic_visit(self, node, action):
return 'generic_visit: {}, {}'.format(node.__class__.__name__, action)
def visit_Foo(self, node, action): # pylint: disable=invalid-name
del node
return 'visit_Foo: {}'.format(action)
def visit_Foo_abc(self, node): # pylint: disable=invalid-name
del node
return 'visit_Foo_abc'
def visit_Foo_xyz(self, node): # pylint: disable=invalid-name
del node
return 'visit_Foo_xyz'
class Foo:
pass
class Bar:
pass
class Baz:
pass
def test_generic_visit():
visitor = MockVisitor()
assert visitor.visit(Bar()) == 'generic_visit: Bar, None'
assert visitor.visit(Bar(), 'unknown') == 'generic_visit: Bar, unknown'
assert visitor.visit(Bar(), '123') == 'generic_visit: Bar, 123'
def test_visit_node():
visitor = MockVisitor()
assert visitor.visit(Foo()) == 'visit_Foo: None'
assert visitor.visit(Foo(), 'unknown') == 'visit_Foo: unknown'
assert visitor.visit(Foo(), '123') == 'visit_Foo: 123'
def test_visit_node_action():
visitor = MockVisitor()
assert visitor.visit(Foo(), 'abc') == 'visit_Foo_abc'
assert visitor.visit(Foo(), 'xyz') == 'visit_Foo_xyz'
def test_invalid_visit():
visitor = MockVisitor()
with pytest.raises(AttributeError, match='visit_Baz is not callable'):
visitor.visit(Baz())
``` |
{
"source": "1Mmanoncouronne/appnexus-client",
"score": 3
} |
#### File: appnexus-client/appnexus/utils.py
```python
from thingy import names_regex
class classproperty(property):
def __get__(self, cls, owner):
return self.fget(owner)
def normalize_service_name(service_name, delimiter='-'):
words = [word.lower() for word in names_regex.findall(service_name)]
normalized_name = delimiter.join(words)
return normalized_name
__all__ = ["classproperty", "normalize_service_name"]
``` |
{
"source": "1MochaChan1/pafy",
"score": 3
} |
#### File: 1MochaChan1/pafy/yt_mod_1.py
```python
import requests
import json
import re
from bs4 import BeautifulSoup
def search_content(query):
search = "https://www.youtube.com/results?search_query="
text = query
text = list(text.split(" "))
search_query = f"{search}{'+'.join(str(x) for x in text)}"
source = requests.get(search_query).text
soup = BeautifulSoup(source, 'lxml')
script = soup.find_all('script')[32]
json_text = re.search('var ytInitialData = (.+)[,;]{1}', str(script)).group(1)
data = json.loads(json_text) #converts json string to dictionary
meta_data = (data['contents']['twoColumnSearchResultsRenderer']
['primaryContents']['sectionListRenderer']
['contents'][0]['itemSectionRenderer']
['contents'])
videos = {}
res = []
#making global variables to use in the nested loops and conditional statements below
link, duration, thumbs, title, num, gafla = '','','','', '', ''
if type(meta_data) is list:
for data in meta_data:
if type(data) is dict:
for key, value in data.items():
if type(value) is dict:
for k,v in value.items():
#if (k == 'videoId') or (k == 'thumbnail') or (k == 'title' and 'runs' in v) or (k=='lengthText'): #Checks if the key we need are present
if k=='videoId' and type(v) is not dict and len(v) == 11:
#count += 1
link = "https://www.youtube.com/watch?v="+v
if type(v) is dict:
if 'thumbnails' in v:
if not v['thumbnails'][0]['url'].startswith('//'): #This code is written to avoid getting a channel's thumbnail
thumbs = v['thumbnails'][0]['url']
if k=='title' and 'runs' in v :
title = v['runs'][0]['text']
if k=='lengthText' and 'simpleText' in v:
duration=v['simpleText']
num = [int(x) for x in duration.split(":")]
if len(num) == 2:
duration=(num[0]*60+num[1])
elif len(num) > 2:
duration=(num[0]*3600+num[1]*60+num[0])
res.append((title, thumbs, link, duration))
for x in range(len(res)): #Storing the video resources in a dictionary
videos[x+1] = res[x]
return videos
"""vid = search_content("dsad")
for k,v in vid.items():
print(f"{k}:{v}")"""
``` |
{
"source": "1MochaChan1/WorkoutPlanner",
"score": 2
} |
#### File: 1MochaChan1/WorkoutPlanner/workout_planner.py
```python
from kivymd.app import MDApp
from kivymd.uix.screen import Screen
from kivymd.uix.button import*
from kivymd.uix.textfield import MDTextField
from kivymd.uix.dialog import MDDialog
from kivymd.uix.menu import MDDropdownMenu
from kivymd.uix.chip import MDChip, MDChooseChip
from kivymd.uix.boxlayout import BoxLayout
from kivymd.uix.datatables import MDDataTable
from kivy.metrics import dp
import numpy as np
from kivy.lang import Builder
import sqlite3 as sql
#===================== Global Variables =======================#
#Builder.load_file("KV.kv")
chip_color = [0.12941176470588237, 0.5882352941176471, 0.9529411764705882, 0.7]
selected_color = [0.12941176470588237, 0.5882352941176471, 0.9529411764705882, 1.0]
#Exercise Databse each sublist has various difficulty, 0 : easy, 1 : medium, 2 : hard
push = ( ["Pushup", "Close Grip", "Wide Grip", "Inclined Pushup"],
["Diamond Pushup", "Archer Pushup","Knuckle Pushup", "Declined Pushups"],
["Decline Diamond", "Pseudo Planche Pushup", "Explosives"] )
pull = (["Chin-ups", "Negatives", "Hanging Hold"],
["Leg Raises", "Slow Pullups", "Dead Hang"],
["Perfect Arched Pullups", "90 degree -Top Hold", "Dead Hang (1 Arm)"])
leg = (["Squats", "Bunny Hops", "Lunges", "Calf Raises"],
["Jumping Squats", "Jumping Lunges", "Bulgarian Split Squats",],
["Pistol Squats", "180 Squats", "Close-Wide Squats", "1 Legged Calf Raises"])
#List of selected exercises.
push_list = []
pull_list = []
leg_list = []
#Database Creation.
con = sql.connect("Workout.db")
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS Plans(Plan varchar(50) UNIQUE, Push TEXT, Pull TEXT, Leg TEXT);")
class BuildApp(Screen): #This class handles everything on screen
def __init__(self, **kwargs):
super().__init__(**kwargs)
diffs = [{'text' : 'Easy'}, {'text' : 'Medium'}, {'text' : 'Hard'}]
self.drop = MDDropdownMenu(
caller = self.ids.pushup_diff,
items = diffs,
width_mult = 4,
)
self.drop.bind(on_release=self.diff_sel)
self.drop1 = MDDropdownMenu(
caller = self.ids.pullup_diff,
items = diffs,
width_mult = 4,
)
self.drop1.bind(on_release=self.diff_sel)
self.drop2 = MDDropdownMenu(
caller = self.ids.legs_diff,
items = diffs,
width_mult = 4,
)
self.drop2.bind(on_release=self.diff_sel)
self.table = MDDataTable(
check = True,
column_data = [
("Plan", dp(30)),
("Push", dp(50)),
("Pull", dp(50)),
("Legs", dp(50)),
],
row_data =[
("Plan Name", "Push Exercises", "Pull Exercises", "Leg Exercises"),
("","","","")
],
pos_hint = {"center_x":0.5, "center_y":0.35},
size_hint = (0.75,0.25),
)
self.add_widget(self.table)
self.table.bind(on_check_press = self.selection)
#Adding the data to the data table
def add_data(self, *args):
cur.execute("SELECT *FROM Plans;")
rows = cur.fetchall()
for row in rows:
self.table.row_data = [
(row),
("","","","")
]
def selection(self, table, table_item):
self.sel_plan = table_item[0]
#Searching a Record
def searching(self, *args):
plan_name = str(self.ids.plan_name.text.strip())
cur.execute("SELECT *FROM Plans WHERE Plan = ?", (plan_name,))
rows = cur.fetchall()
for row in rows:
self.table.row_data = [
(row),
("","","","")
]
print(plan_name)
#Updating A Record
def updation(self, *args):
#Wasn't able to do it using self.push_str and stuff
push_str = ", ".join([str(x) for x in push_list])
pull_str = ", ".join([str(x) for x in pull_list])
leg_str = ", ".join([str(x) for x in leg_list])
record = [push_str, pull_str, leg_str, self.sel_plan]
update = """UPDATE Plans SET Push = ?, Pull = ?, Leg = ? WHERE Plan = ?"""
cur.execute(update, (record))
con.commit()
self.table.row_data=[
(self.sel_plan, push_str, pull_str, leg_str),
("","","","")
]
#Deleting A Record
def deletion(self, *args):
cur.execute("DELETE FROM Plans WHERE Plan = ?", (self.sel_plan,))
con.commit()
self.table.row_data = [
]
#Inserting(Creating) A Record.
def insertion(self):
global push_list, pull_list, leg_list
plan_name = self.ids.plan_name.text.strip()
con = sql.connect("Workout.db")
cur = con.cursor()
push_str = ", ".join([str(x) for x in push_list])
pull_str = ", ".join([str(x) for x in pull_list])
leg_str = ", ".join([str(x) for x in leg_list])
#record = [plan_name, self.push_str, self.pull_str, self.leg_str]
record = [plan_name, push_str, pull_str, leg_str]
cur.execute("INSERT INTO Plans(Plan, Push, Pull, Leg) VALUES(?,?,?,?)", (record))
cur.execute("SELECT *FROM Plans WHERE Plan = ?", (plan_name,))
rows = cur.fetchall()
con.commit()
con.close()
self.add_data()
print(rows)
print("[RECORD INSERTED IN DATABASE SUCCESSFULLY]")
#Fetch the Difficulty and Exercise Selection
def diff_sel(self, menu, menu_item):
global push, pull, leg
print(menu.caller.text) #menu.caller.text gives the text of the helper
print(menu_item.text+"\n")
if menu.caller.text == "Push":
if menu_item.text == "Easy":
#Clearing previous widgets
for child in self.children[:]:
if isinstance(child, MDChip):
if child.text in push[0] + push[1] + push[2]:
self.remove_widget(child)
push_e = {}
for ex,y in zip(push[0], np.arange(0.73,-1,-0.05)):
push_e[round(y,2)] = ex
for pos,ex in push_e.items():
chip = MDChip(
text= ex,
pos_hint = {'center_x':0.2, 'center_y':float(pos)},
on_release = self.selected_push,
check = True
)
chip.color = chip_color
chip.icon = "coffee"
self.add_widget(chip)
elif menu_item.text == "Medium":
#Clearing previous widgets.
for child in self.children[:]:
if isinstance(child, MDChip):
if child.text in push[0] + push[1] + push[2]:
self.remove_widget(child)
push_m = {}
for ex,y in zip(push[1], np.arange(0.73,-1,-0.05)):
push_m[round(y,2)] = ex
for pos,ex in push_m.items():
chip = MDChip(
text= ex,
pos_hint = {'center_x':0.2, 'center_y':float(pos)},
on_release = self.selected_push,
check = True
)
chip.color = chip_color
chip.icon = "coffee"
self.add_widget(chip)
elif menu_item.text == "Hard":
#Clearing previous widgets.
for child in self.children[:]:
if isinstance(child, MDChip):
if child.text in push[0] + push[1] + push[2]:
self.remove_widget(child)
push_h = {}
for ex,y in zip(push[2], np.arange(0.73,-1,-0.05)):
push_h[round(y,2)] = ex
for pos,ex in push_h.items():
chip = MDChip(
text= ex,
pos_hint = {'center_x':0.2, 'center_y':float(pos)},
on_release = self.selected_push,
check = True
)
chip.color = chip_color
chip.icon = "coffee"
self.add_widget(chip)
if menu.caller.text == "Pull":
if menu_item.text == "Easy":
#Clearing previous widgets.
for child in self.children[:]:
if isinstance(child, MDChip):
if child.text in pull[0] + pull[1] + pull[2]:
self.remove_widget(child)
pull_e = {}
for ex,y in zip(pull[0], np.arange(0.73,-1,-0.05)):
pull_e[round(y,2)] = ex
for pos,ex in pull_e.items():
chip = MDChip(
text= ex,
pos_hint = {'center_x':0.505, 'center_y':float(pos)},
on_release = self.selected_pull,
check = True
)
chip.color = chip_color
chip.icon = "coffee"
self.add_widget(chip)
elif menu_item.text == "Medium":
#Clearing previous widgets.
for child in self.children[:]:
if isinstance(child, MDChip):
if child.text in pull[0] + pull[1] + pull[2]:
self.remove_widget(child)
pull_m = {}
for ex,y in zip(pull[1], np.arange(0.73,-1,-0.05)):
pull_m[round(y,2)] = ex
for pos,ex in pull_m.items():
chip = MDChip(
text= ex,
pos_hint = {'center_x':0.505, 'center_y':float(pos)},
on_release = self.selected_pull,
check = True
)
chip.color = chip_color
chip.icon = "coffee"
self.add_widget(chip)
elif menu_item.text == "Hard":
#Clearing previous widgets.
for child in self.children[:]:
if isinstance(child, MDChip):
if child.text in pull[0] + pull[1] + pull[2]:
self.remove_widget(child)
pull_h = {}
for ex,y in zip(pull[2], np.arange(0.73,-1,-0.05)):
pull_h[round(y,2)] = ex
for pos,ex in pull_h.items():
chip = MDChip(
text= ex,
pos_hint = {'center_x':0.505, 'center_y':float(pos)},
on_release = self.selected_pull,
check = True
)
chip.color = chip_color
chip.icon = "coffee"
self.add_widget(chip)
if menu.caller.text == "Legs":
if menu_item.text == "Easy":
#Clearing previous widgets.
for child in self.children[:]:
if isinstance(child, MDChip): #Checks if the widget exists or not. similar to type()
if child.text in leg[0] + leg[1] + leg[2]:
self.remove_widget(child)
leg_e = {}
for ex,y in zip(leg[0], np.arange(0.73,-1,-0.05)):
leg_e[round(y,2)] = ex
for pos,ex in leg_e.items():
chip = MDChip(
text= ex,
pos_hint = {'center_x':0.8, 'center_y':float(pos)},
on_release = self.selected_legs,
check = True
)
chip.color = chip_color
chip.icon = "coffee"
self.add_widget(chip)
elif menu_item.text == "Medium":
#Clearing previous widgets.
for child in self.children[:]:
if isinstance(child, MDChip):
if child.text in leg[0] + leg[1] + leg[2]:
self.remove_widget(child)
leg_m = {}
for ex,y in zip(leg[1], np.arange(0.73,-1,-0.05)):
leg_m[round(y,2)] = ex
for pos,ex in leg_m.items():
chip = MDChip(
text= ex,
pos_hint = {'center_x':0.8, 'center_y':float(pos)},
on_release = self.selected_legs,
check = True
)
chip.color = chip_color
chip.icon = "coffee"
self.add_widget(chip)
elif menu_item.text == "Hard":
#Clearing previous widgets.
for child in self.children[:]:
if isinstance(child, MDChip):
if child.text in leg[0] + leg[1] + leg[2]:
self.remove_widget(child)
leg_h = {}
for ex,y in zip(leg[2], np.arange(0.73,-1,-0.05)):
leg_h[round(y,2)] = ex
for pos,ex in leg_h.items():
chip = MDChip(
text= ex,
pos_hint = {'center_x':0.8, 'center_y':float(pos)},
on_release = self.selected_legs,
check = True
)
chip.color = chip_color
chip.icon = "coffee"
self.add_widget(chip)
#Creating a list of selected exercises.
def selected_push(self, chip_widget):
if chip_widget.color == chip_color:
#self.root.remove_widget(chip_widget) #Root refers to the parent they are inside (Screen)
chip_widget.color = selected_color
if chip_widget.text not in push_list:
push_list.append(chip_widget.text)
else:
chip_widget.color = chip_color
push_list.remove(chip_widget.text)
print(f"Push : {push_list}", chip_widget.pos_hint)
def selected_pull(self, chip_widget):
if chip_widget.color == chip_color:
#self.root.remove_widget(chip_widget) #Root refers to the parent they are inside (Screen)
chip_widget.color = selected_color
if chip_widget.text not in pull_list:
pull_list.append(chip_widget.text)
else:
chip_widget.color = chip_color
pull_list.remove(chip_widget.text)
print(f"Pull : {pull_list}")
def selected_legs(self, chip_widget):
if chip_widget.color == chip_color:
#self.root.remove_widget(chip_widget) #Root refers to the parent they are inside (Screen)
chip_widget.color = selected_color
if chip_widget.text not in leg_list:
leg_list.append(chip_widget.text)
else:
chip_widget.color = chip_color
leg_list.remove(chip_widget.text)
print(f"Legs : {leg_list}")
def submit(self, obj):
if self.ids.plan_name.text is "":
dialog = "Please Enter a Plan Name"
plan_name = "Error"
else:
dialog = f"You successfully created a plan named : {self.ids.plan_name.text}"
plan_name = "Success"
for child in self.children[:]:
if isinstance(child, MDChip):
self.remove_widget(child)
push_list.clear()
pull_list.clear()
leg_list.clear()
close_btn = MDRectangleFlatButton(text = "Close", on_release = self.close_dial)
self.dial = MDDialog(
title = plan_name,
text = dialog,
buttons= [close_btn],
)
self.dial.open()
def close_dial(self, obj):
self.dial.dismiss()
class DemoApp(MDApp):
def build(self): #Screen() is present in this function, widget positioning is done here
self.theme_cls.primary_palette = "LightBlue"
self.theme_cls.theme_hue = "500"
self.theme_cls.theme_style = "Dark"
return BuildApp()
DemoApp().run()
``` |
{
"source": "1moere1/molecule-vmware",
"score": 2
} |
#### File: molecule-vmware/molecule_vmware/driver.py
```python
import os
from molecule.api import Driver
from molecule import logger, util
LOG = logger.get_logger(__name__)
class VMware(Driver):
def __init__(self, config=None):
super(VMware, self).__init__(config)
self._name = "vmware"
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def default_safe_files(self):
return [self.instance_config]
@property
def login_cmd_template(self):
return (
"ssh {address} -l {user} -p {port} -i {identity_file}"
)
@property
def default_ssh_connection_options(self):
return self._get_ssh_connection_options()
def login_options(self, instance_name):
d = {"instance": instance_name}
return util.merge_dicts(d, self._get_instance_config(instance_name))
def ansible_connection_options(self, instance_name):
try:
d = self._get_instance_config(instance_name)
if "instance_os_type" in d:
if d['instance_os_type'] == "linux":
return {
"ansible_user": d["user"],
"ansible_host": d["address"],
"ansible_port": d["port"],
"ansible_private_key_file": d["identity_file"],
"connection": "ssh",
"ansible_ssh_common_args": " ".join(self.ssh_connection_options),
}
if d['instance_os_type'] == "windows":
return {
"ansible_user": d["user"],
"ansible_host": d["address"],
"ansible_password": d["password"],
"ansible_port": d["port"],
"ansible_connection": d["connection"],
"ansible_winrm_transport": d["winrm_transport"],
"ansible_winrm_server_cert_validation": d["winrm_server_cert_validation"]
}
except StopIteration:
return {}
except IOError:
# Instance has yet to be provisioned , therefore the
# instance_config is not on disk.
return {}
def _get_instance_config(self, instance_name):
instance_config_dict = util.safe_load_file(self._config.driver.instance_config)
return next(
item for item in instance_config_dict if item["instance"] == instance_name
)
def template_dir(self):
"""Return path to its own cookiecutterm templates. It is used by init
command in order to figure out where to load the templates from.
"""
return os.path.join(os.path.dirname(__file__), "cookiecutter")
``` |
{
"source": "1mplex/few-shot-object-detection",
"score": 2
} |
#### File: fsdet/data/builtin.py
```python
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets.lvis import (
get_lvis_instances_meta,
register_lvis_instances,
)
from detectron2.data.datasets.pascal_voc import register_pascal_voc
from detectron2.data.datasets.register_coco import register_coco_instances
from .builtin_meta import _get_builtin_metadata
from .meta_coco import register_meta_coco
from .meta_lvis import register_meta_lvis
from .meta_pascal_voc import register_meta_pascal_voc
# ==== Predefined datasets and splits for COCO ==========
_PREDEFINED_SPLITS_COCO = {}
_PREDEFINED_SPLITS_COCO["coco"] = {
"coco_2014_train": (
"coco/train2014",
"coco/annotations/instances_train2014.json",
),
"coco_2014_val": (
"coco/val2014",
"coco/annotations/instances_val2014.json",
),
"coco_2014_minival": (
"coco/val2014",
"coco/annotations/instances_minival2014.json",
),
"coco_2014_minival_100": (
"coco/val2014",
"coco/annotations/instances_minival2014_100.json",
),
"coco_2014_valminusminival": (
"coco/val2014",
"coco/annotations/instances_valminusminival2014.json",
),
"coco_2017_train": (
"coco/train2017",
"coco/annotations/instances_train2017.json",
),
"coco_2017_val": (
"coco/val2017",
"coco/annotations/instances_val2017.json",
),
"coco_2017_test": (
"coco/test2017",
"coco/annotations/image_info_test2017.json",
),
"coco_2017_test-dev": (
"coco/test2017",
"coco/annotations/image_info_test-dev2017.json",
),
"coco_2017_val_100": (
"coco/val2017",
"coco/annotations/instances_val2017_100.json",
),
}
def register_all_coco(root="datasets"):
# for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO.items():
# for key, (image_root, json_file) in splits_per_dataset.items():
# # Assume pre-defined datasets live in `./datasets`.
# register_coco_instances(
# key,
# _get_builtin_metadata(dataset_name),
# os.path.join(root, json_file)
# if "://" not in json_file
# else json_file,
# os.path.join(root, image_root),
# )
# register meta datasets
METASPLITS = [
(
"coco_trainval_all",
"coco/trainval2014",
"cocosplit/datasplit/trainvalno5k.json",
),
(
"coco_trainval_base",
"coco/trainval2014",
"cocosplit/datasplit/trainvalno5k.json",
),
("coco_test_all", "coco/val2014", "cocosplit/datasplit/5k.json"),
("coco_test_base", "coco/val2014", "cocosplit/datasplit/5k.json"),
("coco_test_novel", "coco/val2014", "cocosplit/datasplit/5k.json"),
]
# register small meta datasets for fine-tuning stage
for prefix in ["all", "novel"]:
for shot in [1, 2, 3, 5, 10, 30]:
for seed in range(10):
seed = "" if seed == 0 else "_seed{}".format(seed)
name = "coco_trainval_{}_{}shot{}".format(prefix, shot, seed)
METASPLITS.append((name, "coco/trainval2014", ""))
for name, imgdir, annofile in METASPLITS:
register_meta_coco(
name,
_get_builtin_metadata("coco_fewshot"),
os.path.join(root, imgdir),
os.path.join(root, annofile),
)
# ==== Predefined datasets and splits for LVIS ==========
_PREDEFINED_SPLITS_LVIS = {
"lvis_v0.5": {
# "lvis_v0.5_train": ("coco/train2017", "lvis/lvis_v0.5_train.json"),
"lvis_v0.5_train_freq": (
"coco/train2017",
"lvis/lvis_v0.5_train_freq.json",
),
"lvis_v0.5_train_common": (
"coco/train2017",
"lvis/lvis_v0.5_train_common.json",
),
"lvis_v0.5_train_rare": (
"coco/train2017",
"lvis/lvis_v0.5_train_rare.json",
),
# "lvis_v0.5_val": ("coco/val2017", "lvis/lvis_v0.5_val.json"),
# "lvis_v0.5_val_rand_100": (
# "coco/val2017",
# "lvis/lvis_v0.5_val_rand_100.json",
# ),
# "lvis_v0.5_test": (
# "coco/test2017",
# "lvis/lvis_v0.5_image_info_test.json",
# ),
},
}
def register_all_lvis(root="datasets"):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_LVIS.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_lvis_instances(
key,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file)
if "://" not in json_file
else json_file,
os.path.join(root, image_root),
)
# register meta datasets
METASPLITS = [
(
"lvis_v0.5_train_shots",
"coco/train2017",
"lvissplit/lvis_shots.json",
),
(
"lvis_v0.5_train_rare_novel",
"coco/train2017",
"lvis/lvis_v0.5_train_rare.json",
),
("lvis_v0.5_val_novel", "coco/val2017", "lvis/lvis_v0.5_val.json"),
]
for name, image_root, json_file in METASPLITS:
dataset_name = "lvis_v0.5_fewshot" if "novel" in name else "lvis_v0.5"
register_meta_lvis(
name,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file)
if "://" not in json_file
else json_file,
os.path.join(root, image_root),
)
# ==== Predefined splits for PASCAL VOC ===========
def register_all_pascal_voc(root="datasets"):
# SPLITS = [
# ("voc_2007_trainval", "VOC2007", "trainval"),
# ("voc_2007_train", "VOC2007", "train"),
# ("voc_2007_val", "VOC2007", "val"),
# ("voc_2007_test", "VOC2007", "test"),
# ("voc_2012_trainval", "VOC2012", "trainval"),
# ("voc_2012_train", "VOC2012", "train"),
# ("voc_2012_val", "VOC2012", "val"),
# ]
# for name, dirname, split in SPLITS:
# year = 2007 if "2007" in name else 2012
# register_pascal_voc(name, os.path.join(root, dirname), split, year)
# MetadataCatalog.get(name).evaluator_type = "pascal_voc"
# register meta datasets
METASPLITS = [
("voc_2007_trainval_base1", "VOC2007", "trainval", "base1", 1),
("voc_2007_trainval_base2", "VOC2007", "trainval", "base2", 2),
("voc_2007_trainval_base3", "VOC2007", "trainval", "base3", 3),
("voc_2012_trainval_base1", "VOC2012", "trainval", "base1", 1),
("voc_2012_trainval_base2", "VOC2012", "trainval", "base2", 2),
("voc_2012_trainval_base3", "VOC2012", "trainval", "base3", 3),
("voc_2007_trainval_all1", "VOC2007", "trainval", "base_novel_1", 1),
("voc_2007_trainval_all2", "VOC2007", "trainval", "base_novel_2", 2),
("voc_2007_trainval_all3", "VOC2007", "trainval", "base_novel_3", 3),
("voc_2012_trainval_all1", "VOC2012", "trainval", "base_novel_1", 1),
("voc_2012_trainval_all2", "VOC2012", "trainval", "base_novel_2", 2),
("voc_2012_trainval_all3", "VOC2012", "trainval", "base_novel_3", 3),
("voc_2007_test_base1", "VOC2007", "test", "base1", 1),
("voc_2007_test_base2", "VOC2007", "test", "base2", 2),
("voc_2007_test_base3", "VOC2007", "test", "base3", 3),
("voc_2007_test_novel1", "VOC2007", "test", "novel1", 1),
("voc_2007_test_novel2", "VOC2007", "test", "novel2", 2),
("voc_2007_test_novel3", "VOC2007", "test", "novel3", 3),
("voc_2007_test_all1", "VOC2007", "test", "base_novel_1", 1),
("voc_2007_test_all2", "VOC2007", "test", "base_novel_2", 2),
("voc_2007_test_all3", "VOC2007", "test", "base_novel_3", 3),
]
# register small meta datasets for fine-tuning stage
for prefix in ["all", "novel"]:
for sid in range(1, 4):
for shot in [1, 2, 3, 5, 10]:
for year in [2007, 2012]:
for seed in range(100):
seed = "" if seed == 0 else "_seed{}".format(seed)
name = "voc_{}_trainval_{}{}_{}shot{}".format(
year, prefix, sid, shot, seed
)
dirname = "VOC{}".format(year)
img_file = "{}_{}shot_split_{}_trainval".format(
prefix, shot, sid
)
keepclasses = (
"base_novel_{}".format(sid)
if prefix == "all"
else "novel{}".format(sid)
)
METASPLITS.append(
(name, dirname, img_file, keepclasses, sid)
)
for name, dirname, split, keepclasses, sid in METASPLITS:
year = 2007 if "2007" in name else 2012
register_meta_pascal_voc(
name,
_get_builtin_metadata("pascal_voc_fewshot"),
os.path.join(root, dirname),
split,
year,
keepclasses,
sid,
)
MetadataCatalog.get(name).evaluator_type = "pascal_voc"
# Register them all under "./datasets"
register_all_coco()
register_all_lvis()
register_all_pascal_voc()
```
#### File: fsdet/evaluation/coco_evaluation.py
```python
import contextlib
import copy
import io
import itertools
import json
import logging
import numpy as np
import os
import torch
from collections import OrderedDict
from fvcore.common.file_io import PathManager
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from tabulate import tabulate
import detectron2.utils.comm as comm
from detectron2.data import MetadataCatalog
from detectron2.data.datasets.coco import convert_to_coco_json
from detectron2.structures import BoxMode
from detectron2.utils.logger import create_small_table
from fsdet.evaluation.evaluator import DatasetEvaluator
class COCOEvaluator(DatasetEvaluator):
"""
Evaluate instance detection outputs using COCO's metrics and APIs.
"""
def __init__(self, dataset_name, cfg, distributed, output_dir=None):
"""
Args:
dataset_name (str): name of the dataset to be evaluated.
It must have either the following corresponding metadata:
"json_file": the path to the COCO format annotation
Or it must be in detectron2's standard dataset format
so it can be converted to COCO format automatically.
cfg (CfgNode): config instance
distributed (True):
if True, will collect results from all ranks for evaluation.
Otherwise, will evaluate the results in the current process.
output_dir (str): optional, an output directory to dump results.
"""
self._distributed = distributed
self._output_dir = output_dir
self._dataset_name = dataset_name
self._cpu_device = torch.device("cpu")
self._logger = logging.getLogger(__name__)
self._metadata = MetadataCatalog.get(dataset_name)
if not hasattr(self._metadata, "json_file"):
self._logger.warning(
f"json_file was not found in MetaDataCatalog for '{dataset_name}'")
cache_path = convert_to_coco_json(dataset_name, output_dir)
self._metadata.json_file = cache_path
self._is_splits = "all" in dataset_name or "base" in dataset_name \
or "novel" in dataset_name
self._base_classes = [
8, 10, 11, 13, 14, 15, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35,
36, 37, 38, 39, 40, 41, 42, 43, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 65, 70, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 84, 85, 86, 87, 88, 89, 90,
]
self._novel_classes = [1, 2, 3, 4, 5, 6, 7, 9, 16, 17, 18, 19, 20, 21,
44, 62, 63, 64, 67, 72]
json_file = PathManager.get_local_path(self._metadata.json_file)
with contextlib.redirect_stdout(io.StringIO()):
self._coco_api = COCO(json_file)
# Test set json files do not contain annotations (evaluation must be
# performed using the COCO evaluation server).
self._do_evaluation = "annotations" in self._coco_api.dataset
def reset(self):
self._predictions = []
self._coco_results = []
def process(self, inputs, outputs):
"""
Args:
inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
It is a list of dict. Each dict corresponds to an image and
contains keys like "height", "width", "file_name", "image_id".
outputs: the outputs of a COCO model. It is a list of dicts with key
"instances" that contains :class:`Instances`.
"""
for input, output in zip(inputs, outputs):
prediction = {"image_id": input["image_id"]}
# TODO this is ugly
if "instances" in output:
instances = output["instances"].to(self._cpu_device)
prediction["instances"] = instances_to_coco_json(
instances, input["image_id"])
self._predictions.append(prediction)
def evaluate(self):
if self._distributed:
comm.synchronize()
self._predictions = comm.gather(self._predictions, dst=0)
self._predictions = list(itertools.chain(*self._predictions))
if not comm.is_main_process():
return {}
if len(self._predictions) == 0:
self._logger.warning(
"[COCOEvaluator] Did not receive valid predictions.")
return {}
if self._output_dir:
PathManager.mkdirs(self._output_dir)
file_path = os.path.join(
self._output_dir, "instances_predictions.pth")
with PathManager.open(file_path, "wb") as f:
torch.save(self._predictions, f)
self._results = OrderedDict()
if "instances" in self._predictions[0]:
self._eval_predictions()
# Copy so the caller can do whatever with results
return copy.deepcopy(self._results)
def _eval_predictions(self):
"""
Evaluate self._predictions on the instance detection task.
Fill self._results with the metrics of the instance detection task.
"""
self._logger.info("Preparing results for COCO format ...")
self._coco_results = list(
itertools.chain(*[x["instances"] for x in self._predictions]))
# unmap the category ids for COCO
if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
reverse_id_mapping = {
v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items()
}
for result in self._coco_results:
result["category_id"] = reverse_id_mapping[result["category_id"]]
if self._output_dir:
file_path = os.path.join(self._output_dir, "coco_instances_results.json")
self._logger.info("Saving results to {}".format(file_path))
with PathManager.open(file_path, "w") as f:
f.write(json.dumps(self._coco_results))
f.flush()
if not self._do_evaluation:
self._logger.info("Annotations are not available for evaluation.")
return
self._logger.info("Evaluating predictions ...")
if self._is_splits:
self._results["bbox"] = {}
for split, classes, names in [
("all", None, self._metadata.get("thing_classes")),
("base", self._base_classes, self._metadata.get("base_classes")),
("novel", self._novel_classes, self._metadata.get("novel_classes"))]:
if "all" not in self._dataset_name and \
split not in self._dataset_name:
continue
coco_eval = (
_evaluate_predictions_on_coco(
self._coco_api, self._coco_results, "bbox", classes,
)
if len(self._coco_results) > 0
else None # cocoapi does not handle empty results very well
)
res_ = self._derive_coco_results(
coco_eval, "bbox", class_names=names,
)
res = {}
for metric in res_.keys():
if len(metric) <= 4:
if split == "all":
res[metric] = res_[metric]
elif split == "base":
res["b"+metric] = res_[metric]
elif split == "novel":
res["n"+metric] = res_[metric]
self._results["bbox"].update(res)
# add "AP" if not already in
if "AP" not in self._results["bbox"]:
if "nAP" in self._results["bbox"]:
self._results["bbox"]["AP"] = self._results["bbox"]["nAP"]
else:
self._results["bbox"]["AP"] = self._results["bbox"]["bAP"]
else:
coco_eval = (
_evaluate_predictions_on_coco(
self._coco_api, self._coco_results, "bbox",
)
if len(self._coco_results) > 0
else None # cocoapi does not handle empty results very well
)
res = self._derive_coco_results(
coco_eval, "bbox",
class_names=self._metadata.get("thing_classes")
)
self._results["bbox"] = res
def _derive_coco_results(self, coco_eval, iou_type, class_names=None):
"""
Derive the desired score numbers from summarized COCOeval.
Args:
coco_eval (None or COCOEval): None represents no predictions from model.
iou_type (str):
class_names (None or list[str]): if provided, will use it to predict
per-category AP.
Returns:
a dict of {metric name: score}
"""
metrics = ["AP", "AP50", "AP75", "APs", "APm", "APl"]
if coco_eval is None:
self._logger.warn("No predictions from the model! Set scores to -1")
return {metric: -1 for metric in metrics}
# the standard metrics
results = {
metric: float(coco_eval.stats[idx] * 100) \
for idx, metric in enumerate(metrics)
}
self._logger.info(
"Evaluation results for {}: \n".format(iou_type) + \
create_small_table(results)
)
if class_names is None or len(class_names) <= 1:
return results
# Compute per-category AP
# from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa
precisions = coco_eval.eval["precision"]
# precision has dims (iou, recall, cls, area range, max dets)
assert len(class_names) == precisions.shape[2]
results_per_category = []
for idx, name in enumerate(class_names):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
ap = np.mean(precision) if precision.size else float("nan")
results_per_category.append(("{}".format(name), float(ap * 100)))
# tabulate it
N_COLS = min(6, len(results_per_category) * 2)
results_flatten = list(itertools.chain(*results_per_category))
results_2d = itertools.zip_longest(
*[results_flatten[i::N_COLS] for i in range(N_COLS)])
table = tabulate(
results_2d,
tablefmt="pipe",
floatfmt=".3f",
headers=["category", "AP"] * (N_COLS // 2),
numalign="left",
)
self._logger.info("Per-category {} AP: \n".format(iou_type) + table)
results.update({"AP-" + name: ap for name, ap in results_per_category})
return results
def instances_to_coco_json(instances, img_id):
"""
Dump an "Instances" object to a COCO-format json that's used for evaluation.
Args:
instances (Instances):
img_id (int): the image id
Returns:
list[dict]: list of json annotations in COCO format.
"""
num_instance = len(instances)
if num_instance == 0:
return []
boxes = instances.pred_boxes.tensor.numpy()
boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
boxes = boxes.tolist()
scores = instances.scores.tolist()
classes = instances.pred_classes.tolist()
results = []
for k in range(num_instance):
result = {
"image_id": img_id,
"category_id": classes[k],
"bbox": boxes[k],
"score": scores[k],
}
results.append(result)
return results
def _evaluate_predictions_on_coco(coco_gt, coco_results, iou_type, catIds=None):
"""
Evaluate the coco results using COCOEval API.
"""
assert len(coco_results) > 0
coco_dt = coco_gt.loadRes(coco_results)
coco_eval = COCOeval(coco_gt, coco_dt, iou_type)
if catIds is not None:
coco_eval.params.catIds = catIds
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
return coco_eval
``` |
{
"source": "1mplex/segmentation_image_augmentation",
"score": 2
} |
#### File: segmentation_image_augmentation/augment/MultiPartAugmentor.py
```python
from augment.Augmentor import Augmentor
from utils import *
class MultiPartAugmentor(Augmentor):
def __init__(self, params):
super().__init__(params)
def get_input_type(self):
return 'multi-part'
def _transform_masks(self):
self._call_buffer['small_masks'] = []
object_colors = generate_colors(len(self._call_buffer['mask_list']))
if 'class' in self.output_type_list:
class_colors = generate_colors(self.num_classes + 1)
if 'multi-part' in self.output_type_list:
objects = []
total_parts = 0
for i, mask in enumerate(self._call_buffer['mask_list']):
obj = semantic2binary_list(mask)
total_parts += len(obj)
objects.append(obj)
parts_colors = generate_colors(total_parts)
for i, mask in enumerate(self._call_buffer['mask_list']):
m = {}
if 'single' in self.output_type_list:
m['single'] = single2multi(semantic2binary(mask))
if 'multi-object' in self.output_type_list:
m['multi-object'] = color_mask(mask, object_colors[i])
if 'multi-part' in self.output_type_list:
obj_colors = [parts_colors.pop(0) for i in range(len(objects[i]))]
m['multi-part'] = binary_list2semantic(objects[i], obj_colors)
if 'class' in self.output_type_list:
m['class'] = color_mask(mask, class_colors[self._call_buffer['class_list'][i]])
self._call_buffer['small_masks'].append(m)
```
#### File: segmentation_image_augmentation/datagen/DataGen.py
```python
import os
import random
from utils import *
from augment.SingleAugmentor import SingleAugmentor
from augment.MultiPartAugmentor import MultiPartAugmentor
from augment.SemanticAugmentor import SemanticAugmentor
class DataGen:
def __init__(self, input_path, input_type,
img_prefix='rgb', mask_prefix='label',
augmentor_params=None, class_mapping=None,
balance=False, separable_class=False):
self.input_path = input_path
self.input_type = input_type
self.augmentor_params = augmentor_params
self.img_prefix = img_prefix
self.mask_prefix = mask_prefix
self.class_mapping = class_mapping
self.balance = balance
self.separable_class = separable_class
self._get_classes()
self.set_class_mappings()
self._get_pairs_list()
self._get_stats()
self._set_augmentor()
def _set_augmentor(self):
augmentor_types = {
'single': SingleAugmentor,
'multi-part': MultiPartAugmentor,
'semantic': SemanticAugmentor
}
if self.input_type not in list(augmentor_types.keys()):
raise UserWarning('Unrecognized input type: {}'.format(self.input_type))
else:
self.augmentor = augmentor_types[self.input_type](self.augmentor_params)
self.output_type_list = self.augmentor.output_type_list
def _get_classes(self):
self.classes = next(os.walk(self.input_path))[1]
def set_class_mappings(self):
if self.class_mapping is None:
class2num = {}
for i, c in enumerate(self.classes):
class2num[c] = i
self.class2num = class2num
else:
self.class2num = self.class_mapping
num2class = {}
for k, v in self.class2num.items():
num2class[v] = k
self.num2class = num2class
def _get_pairs_list(self):
input_pairs = {}
for class_name in self.classes:
class_dir = os.path.join(self.input_path, class_name)
subdirs = set(next(os.walk(class_dir))[1])
if len(subdirs) == 0:
images, masks = get_img_mask_list(class_dir, mask_path=None,
img_prefix=self.img_prefix, mask_prefix=self.mask_prefix)
elif subdirs == {self.img_prefix, self.mask_prefix}:
img_class_dir = os.path.join(class_dir, self.img_prefix)
mask_class_dir = os.path.join(class_dir, self.mask_prefix)
images, masks = get_img_mask_list(img_class_dir, mask_path=mask_class_dir,
img_prefix=self.img_prefix, mask_prefix=self.mask_prefix)
else:
raise UserWarning('Wrong input files structure')
input_pairs[class_name] = {
'images': images,
'masks': masks
}
self.input_pairs = input_pairs
def _get_stats(self):
self.classes = list(self.input_pairs.keys())
self.num_classes = len(self.classes)
self.sample_per_class = [len(self.input_pairs[c]['images']) for c in self.input_pairs]
self.total_samples = sum(self.sample_per_class)
if self.balance:
self.class_weights = [i / self.total_samples for i in self.sample_per_class]
else:
self.class_weights = [1 / self.num_classes] * self.num_classes
def _choose_class(self):
return random.choices(self.classes, weights=self.class_weights, k=1)[0]
def _get_next_input_pair(self, class_name=None):
if class_name is None:
class_name = self._choose_class()
sample_num = random.randint(0, len(self.input_pairs[class_name]['images']) - 1)
img = self.input_pairs[class_name]['images'][sample_num]
mask = self.input_pairs[class_name]['masks'][sample_num]
return read(img), read(mask), self.class2num[class_name]
def get_scene(self, scene_samples=1):
images = []
masks = []
classes = []
class_name = self._choose_class() if self.separable_class else None
for i in range(scene_samples):
img, msk, cl = self._get_next_input_pair(class_name)
images.append(img)
masks.append(msk)
classes.append(cl)
transformed_scene = self.augmentor.transform(images, masks, classes)
return transformed_scene, classes
```
#### File: segmentation_image_augmentation/datagen/SavingDataGen.py
```python
import os
from pathlib import Path
from collections import Counter
from utils import *
from datagen.DataGen import DataGen
class SavingDataGen(DataGen):
def __init__(self, input_path, input_type, output_path, split_masks=False,
img_prefix='rgb', mask_prefix='label',
augmentor_params=None, class_mapping=None,
balance=True, separable_class=False,
img_format='jpg', mask_format='png'):
super().__init__(input_path, input_type,
img_prefix=img_prefix, mask_prefix=mask_prefix,
augmentor_params=augmentor_params, class_mapping=class_mapping,
balance=balance, separable_class=separable_class)
self.output_path = output_path
self.split_masks = split_masks
self.img_format = img_format
self.mask_format = mask_format
def prepare_folders(self):
Path(self.output_path).mkdir(parents=True, exist_ok=True)
if self.separable_class:
for c in self.classes:
class_dir = os.path.join(self.output_path, c)
Path(class_dir).mkdir(parents=True, exist_ok=True)
if self.split_masks:
for out_type in self.output_type_list:
Path(os.path.join(class_dir, out_type)).mkdir(parents=True, exist_ok=True)
Path(os.path.join(class_dir, 'images')).mkdir(parents=True, exist_ok=True)
else:
if self.split_masks:
for out_type in self.output_type_list:
Path(os.path.join(self.output_path, out_type)).mkdir(parents=True, exist_ok=True)
Path(os.path.join(self.output_path, 'images')).mkdir(parents=True, exist_ok=True)
def create_dataset(self, num_samples=1, scene_samples=1):
self.prepare_folders()
data_file = os.path.join(self.output_path, 'description.csv')
try:
previous_data = read_csv(data_file)
sample_num = previous_data['sample_num'].max() + 1
except:
sample_num = 1
total_time = 0
load_time = 0
transform_time = 0
streaming_time = 0
avg_h = []
avg_w = []
for sample in range(num_samples):
transformed_scene, classes = self.get_scene(scene_samples)
avg_h.append(transformed_scene['scene'].shape[0])
avg_w.append(transformed_scene['scene'].shape[1])
samples_per_class_num = dict(Counter(classes))
for input_class in range(self.num_classes):
if input_class not in samples_per_class_num.keys():
samples_per_class_num[input_class] = 0
sample_data = {
'sample_num': sample_num,
'height': transformed_scene['scene'].shape[0],
'width': transformed_scene['scene'].shape[1]
}
for input_class in range(self.num_classes):
sample_data[str(input_class) + '_class'] = samples_per_class_num[input_class]
c = self.num2class[classes[0]] if self.separable_class else ''
image_path = os.path.join(
self.output_path, c,
'images' if self.split_masks else '',
'generated_{:0>5}_{}.{}'.format(sample_num, self.img_prefix, self.img_format)
)
write(image_path, transformed_scene['scene'])
sample_data['image_path'] = image_path
for mask_type in self.output_type_list:
m = mask_type if self.split_masks else ''
mask_path = os.path.join(
self.output_path, c, m,
'generated_{:0>5}_{}_{}.{}'.format(sample_num, mask_type, self.mask_prefix, self.mask_format)
)
write(mask_path, transformed_scene['masks'][mask_type])
sample_data[mask_type + '_path'] = mask_path
if 'bboxes' in transformed_scene:
subdir = 'bboxes' if self.split_masks else ''
bbox_dir = os.path.join(self.output_path, subdir)
Path(bbox_dir).mkdir(parents=True, exist_ok=True)
bbox_path = os.path.join(bbox_dir, 'generated_{:0>5}_bbox.csv'.format(sample_num))
x_min_list = []
x_max_list = []
y_min_list = []
y_max_list = []
for bbox in transformed_scene['bboxes']['multi-object']:
[(x_min, y_max), (_, y_min), (x_max, _), (_, _), (_, _)] = bbox
x_min_list.append(x_min)
x_max_list.append(x_max)
y_min_list.append(y_min)
y_max_list.append(y_max)
bboxes_df = pd.DataFrame({
'x_min': x_min_list,
'x_max': x_max_list,
'y_min': y_min_list,
'y_max': y_max_list,
'class': classes
})
write_csv(bboxes_df, bbox_path)
sample_data['bbox_path'] = bbox_path
write_csv(pd.DataFrame(sample_data, index=[0]), data_file)
sample_num += 1
return (total_time, load_time, transform_time, streaming_time), (sum(avg_h) / len(avg_h), sum(avg_w) / len(avg_w))
```
#### File: segmentation_image_augmentation/transformations/binarize_mask.py
```python
def binarize_mask(mask):
"""Returns binary 2D mask.
Args:
mask (numpy.array): RGB or grayscale mask.
Returns:
mask (numpy.array): binarized grayscale mask.
"""
if len(list(mask.shape)) == 2:
return mask
else:
return (mask > 0).max(axis=2).astype(int) * 255
```
#### File: segmentation_image_augmentation/transformations/random_crop.py
```python
import random
def random_crop(img, size):
if (img.shape[0] < size[0]) or (img.shape[1] < size[1]):
raise UserWarning("Can't crop a big image from a small one")
if (img.shape[0] == size[0]) and (img.shape[1] == size[1]):
return img
start_height = random.randint(0, img.shape[0] - size[0] - 1)
start_width = random.randint(0, img.shape[1] - size[1] - 1)
return img[start_height:start_height + size[0], start_width:start_width + size[1], :]
```
#### File: segmentation_image_augmentation/transformations/resize.py
```python
import cv2
def resize(img, shape):
return cv2.resize(img, (shape[1], shape[0]))
```
#### File: segmentation_image_augmentation/transformations/rotate.py
```python
import random
from utils.pil import *
def rotate_pair(img, mask, degree):
"""Rotates image and mask for the degree from range [-degree, degree]
Args:
img (numpy.array): RGB or grayscale image.
mask (numpy.array): image mask.
degree (int): maximum rotation degree.
Returns:
img (numpy.array): rotated image.
mask (numpy.array): rotated image mask.
"""
rotation_degree = int(random.random() * degree)
rotation_degree = rotation_degree if random.random() < 0.5 else -rotation_degree
img = rotate(img, rotation_degree)
mask = rotate(mask, rotation_degree)
return img, mask
def rotate(img, degree):
"""Rotates image for the specified degree.
Args:
img (numpy.array): RGB or grayscale image.
degree (int): rotation degree
Returns:
img (numpy.array): rotated image.
"""
pil_img = np2pil(img)
pil_img = pil_img.rotate(degree)
np_img = pil2np(pil_img)
return np_img
```
#### File: segmentation_image_augmentation/utils/check_is_image.py
```python
def check_is_image(img):
try:
if not ((img.shape[2] == 3) and (img.shape[0] > 10) and (img.shape[1] > 10)):
raise UserWarning('Wrong image size')
except:
raise UserWarning('Not an numpy image')
```
#### File: segmentation_image_augmentation/utils/colors.py
```python
import itertools
import math
import numpy as np
def generate_colors(n):
levels_count = math.ceil(math.pow(n + 2, 1 / 3)) # remove black and white
step = 1 / levels_count
levels = [1 - (step * i) for i in range(levels_count)]
colors = []
for p in itertools.product(levels, repeat=3):
colors.append((np.array([*p]) * 255).astype(int))
return colors[1:n + 1]
def human2machine_mask(mask, num2class_dict):
new_mask = np.zeros((mask.shape[0], mask.shape[1]))
colors = generate_colors(len(list(num2class_dict.keys())))
for i, c in enumerate(num2class_dict.keys()):
a = np.any((mask).astype(int) == list(colors[i]), axis=-1)
new_mask[a] = int(c)
return new_mask.astype('int8')
```
#### File: segmentation_image_augmentation/utils/mask2mask.py
```python
import numpy as np
from utils.colors import generate_colors
from utils.format_image import format_image
def semantic2binary(mask):
return format_image((mask > 0).max(axis=2))
def single2multi(mask):
return np.stack((mask,) * 3, axis=-1)
def semantic2binary_list(mask):
"""Input RGB image"""
unsqueezed_mask = mask.reshape(-1, mask.shape[2])
masks_colors = np.unique(unsqueezed_mask, axis=0)
background_index = np.argwhere(np.sum(masks_colors, axis=1) == 0)
masks_colors = np.delete(masks_colors, background_index, 0)
colors_count = masks_colors.shape[0]
masks = []
for i in range(colors_count):
masks.append(format_image((mask == masks_colors[i]).reshape(mask.shape)))
return masks
def binary_list2semantic(mask_list, colors=None):
main_mask = np.zeros_like(mask_list[0])
if colors is None:
colors = generate_colors(len(mask_list))
for i, mask in enumerate(mask_list):
main_mask[:, :, :3][mask[:, :, 0] > 0] = colors[i]
return format_image(main_mask)
def color_mask(mask, color):
m = (mask[:, :, 0] > 0) | (mask[:, :, 1] > 0) | (mask[:, :, 2] > 0)
new_mask = mask.copy()
new_mask[m] = color
return format_image(new_mask)
```
#### File: segmentation_image_augmentation/utils/pack_images.py
```python
import copy
import math
import numpy as np
# import rpack
from rectpack import newPacker
from rectpack.maxrects import MaxRectsBssf
def _change_dim_order(sizes):
return [[s[1], s[0]] for s in sizes]
# def get_pack_coords(sizes):
# # list of [height, width] i.e. img.shape order
# sizes = _change_dim_order(sizes)
# positions = rpack.pack(sizes)
# return _change_dim_order(positions)
def _pack(rectangles, bins):
packer = newPacker(pack_algo=MaxRectsBssf)
for r in rectangles:
packer.add_rect(*r)
for b in bins:
packer.add_bin(*b)
packer.pack()
all_rects = packer.rect_list()
res = []
for rect in all_rects:
res.append(np.array(rect))
res = np.array(res)
res.view('i8,i8,i8,i8,i8,i8,').sort(order=['f5'], axis=0)
res = [list(i) for i in res[:, 1:3]]
return res
def get_pack_coords(sizes):
s = copy.deepcopy(sizes)
[s[i].append(i + 1) for i in range(len(s))]
s = np.array([np.array(i) for i in s]).copy()
total_h, total_w, _ = s.sum(axis=0)
max_h = s[:, 0].max(axis=0)
virtual_cols = math.ceil(math.sqrt(len(sizes)))
height_limit = max(max_h, int(1.2 * (total_h / virtual_cols)))
rectangles = [tuple(i) for i in s]
bins = [(height_limit, total_w)]
coords = _pack(rectangles, bins)
if len(coords) != len(sizes):
coords = _pack(rectangles, [(int(2 * max_h), total_w)])
return coords
```
#### File: segmentation_image_augmentation/utils/read.py
```python
import matplotlib.pyplot as plt
def read(path):
"""Reads an image from the specified path.
Args:
path (str): image path.
:Returns:
img (numpy.array): RGB image
"""
return plt.imread(path)[:, :, :3]
```
#### File: segmentation_image_augmentation/utils/supervisely2sia.py
```python
import os
import sys
import json
from pathlib import Path
import shutil
from utils import *
def supervisely2sia(init_data_folder, sia_data_folder):
'''
Prepares data collected in https://app.supervise.ly to work with SIA.
Returns images and masks lists.
'''
# with open(os.path.join(init_data_folder, 'obj_class_to_machine_color.json'), 'r') as file:
# data = file.read()
# class_colors_config = json.loads(data)
# objects = list(class_colors_config.keys())
# instances = [i for i in objects if 'Instance' in i]
classes = [i for i in os.listdir(init_data_folder) if os.path.isdir(os.path.join(init_data_folder, i))]
for c in classes:
Path(os.path.join(sia_data_folder, c)).mkdir(parents=True, exist_ok=True)
for i in get_images_list(os.path.join(init_data_folder, c, 'img')):
orig_file = i.split('/')[-1]
img_num, file_type = orig_file.split('.')
shutil.copy(i, os.path.join(sia_data_folder, c, img_num + '_rgb.' + file_type))
shutil.copy(i.replace('img', 'masks_machine').replace('jpg', 'png'),
os.path.join(sia_data_folder, c, img_num + '_label.png'))
``` |
{
"source": "1MT3J45/DS-StockAnalysis",
"score": 3
} |
#### File: 1MT3J45/DS-StockAnalysis/face.py
```python
from kivy.uix.boxlayout import BoxLayout
from kivy.lang import Builder
from kivy.app import App
import YP03
import sys
import dfgui
import pandas as pd
Builder.load_string('''
<faceTool>:
num1: num1
result: result
orientation: 'vertical'
BoxLayout:
orientation: 'horizontal'
Label:
id: num1
text: 'Stock Data Analysis'
BoxLayout:
orientation: 'horizontal'
GridLayout:
cols: 6
Label:
id: blank1
Label:
id: blank2
Button:
text: 'Execute'
height: 10
width: 30
on_press: root.display_fun(self)
Label:
text: 'EMPTY SLOT'
height: 10
width: 30
on_press:
Button:
text: "Show XLS Sheet"
height: 10
width: 30
on_press: root.graph()
Button:
text: "Clear"
height: 10
width: 30
on_press: root.clear_screen()
BoxLayout:
orientation: 'horizontal'
Label:
id: result
GridLayout:
cols: 2
size_hint_y: None
Button:
text: "Clear"
on_press: root.clear_screen()
height: 10
width: 30
BubbleButton:
text: 'Exit'
on_press: root.exit_it()
height: 10
width: 30
''')
class face_app(App):
def build(self):
return faceTool()
class faceTool(BoxLayout):
def __init__(self, **kwargs):
super(faceTool, self).__init__(**kwargs)
def display_fun(self, instance):
'''Fuction called when numeric buttons are pressed,
if the operation button is pressed the numbers after will be
on the right hand side.
'''
DayClusterNames, length = YP03.execute()
res = ''
for i in range(len(DayClusterNames)):
res = str(DayClusterNames[i])+'\n'+res
self.result.text = str(res)
def exit_it(self):
sys.exit()
def graph(self):
# xls = pd.read_excel('Res.xls')
# df = pd.DataFrame(xls)
# dfgui.show(df)
import main
def clear_screen(self):
self.result.text = ''
face_app().run()
``` |
{
"source": "1MT3J45/ML-DroughtAnalysisNLP",
"score": 3
} |
#### File: 1MT3J45/ML-DroughtAnalysisNLP/freqWordSelection.py
```python
from __future__ import division
from __future__ import print_function
from __future__ import with_statement
from replacers import *
import pandas as pd
import nltk
import subprocess
def findFreqWord(fuzzyDF):
f1 = fuzzyDF # pd.read_csv("SubmittedCSV/fuzzy.csv")
f2 = pd.DataFrame(columns=['Tweets', 'Classified', 'FreqWord'])
f3 = pd.read_csv("SubmittedCSV/fuzzyptag.csv", )
pop_list = list(f3.iloc[:, 0])
for zero_cl_row in range(f1.__len__()):
row = 1
found = False
splitted_sentence = f1.iloc[zero_cl_row, 0].split()
print(splitted_sentence)
for tag in pop_list:
print("Popular tags:", pop_list)
for word in splitted_sentence:
if word in tag and f1.iloc[zero_cl_row, 1] == "Highly Positive":
f2 = f2.append(
{'Tweets': f1.iloc[zero_cl_row, 0], 'Classified': 'Highly Positive', 'FreqWord': tag},
ignore_index=True)
found = True
row += 1
elif word in tag and f1.iloc[zero_cl_row, 1] == "Highly Negative":
f2 = f2.append(
{'Tweets': f1.iloc[zero_cl_row, 0], 'Classified': 'Highly Negative', 'FreqWord': tag},
ignore_index=True)
found = True
row += 1
elif word in tag and f1.iloc[zero_cl_row, 1] == "Moderately Positive":
f2 = f2.append(
{'Tweets': f1.iloc[zero_cl_row, 0], 'Classified': 'Moderately Positive', 'FreqWord': tag},
ignore_index=True)
found = True
row += 1
elif word in tag and f1.iloc[zero_cl_row, 1] == "Moderately Negative":
f2 = f2.append(
{'Tweets': f1.iloc[zero_cl_row, 0], 'Classified': 'Moderately Negative', 'FreqWord': tag},
ignore_index=True)
found = True
row += 1
elif word in tag and f1.iloc[zero_cl_row, 1] == "Positive":
f2 = f2.append({'Tweets': f1.iloc[zero_cl_row, 0], 'Classified': 'Positive', 'FreqWord': tag},
ignore_index=True)
found = True
row += 1
elif word in tag and f1.iloc[zero_cl_row, 1] == "Negative":
f2 = f2.append({'Tweets': f1.iloc[zero_cl_row, 0], 'Classified': 'Negative', 'FreqWord': tag},
ignore_index=True)
found = True
row += 1
else:
print("Unmatched")
if not found:
print("NO")
f2.to_csv("SubmittedCSV/fuzzyfreq.csv", index=False)
try:
subprocess.call(['libreoffice','--calc','SubmittedCSV/fuzzyfreq.csv'])
except OSError:
print("Works with DEBIAN OS & LIBREOFFICE 5 only \n Use MS Excel or equivalent Software to open : "
"SubmittedCSV/fuzzyfreq.csv")
return f2
def pivotTable():
pass
# ---------------------------------- SUBMITTED LOGIC - TEST CASE
# ---------------------------------- #01 UNIT TESTING FAILED ##10, 11, 27, 30
# ---------------------------------- #02 LOGICAL GLITCH
# ---------------------------------- #03 COMPLIANCE MISUSE
# ---------------------------------- #04 MEMDUMP DETECTED
# ---------------------------------- #05 UNUSED OBJECTS, MEMORY BLOCK 0x0008
# for hosts_row in f1:
# row = 1
# found = False
# # t1=nltk.word_tokenize(hosts_row[0])
# t1 = hosts_row.split()
# print("t1=", t1)
# for master_row in pop_list:
# print("popular tags=", pop_list)
# for word in t1:
#
# if word == master_row[0] and hosts_row[1] == "Highly Positive":
# # >>> master_row[0] # Logical glitch, value uncompilable
# # 'b'
# f2.write(str(hosts_row[1]) + "," + word) # Will always look for 1st element of string
# # >>> hosts_row
# # ' neville rooney end ever tons trophy drought httpcocryingeyesjebfkdp,Positive\r\n'
# # >>> hosts_row[1]
# # 'n'
# found = True
# row = row + 1
#
# elif word == master_row[0] and hosts_row[1] == "Highly Negative":
# f2.write(str(hosts_row[1]) + "," + str(master_row[0]))
# found = True
# row = row + 1
# elif word == master_row[0] and hosts_row[1] == "Moderately Positive":
# f2.write(str(hosts_row[1]) + "," + str(master_row[0]))
# found = True
# row = row + 1
# elif word == master_row[0] and hosts_row[1] == "Moderately Negative":
# f2.write(str(hosts_row[1]) + "," + str(master_row[0]))
# found = True
# row = row + 1
# elif word == master_row[0] and hosts_row[1] == "Positive":
# f2.write(str(hosts_row[1]) + "," + str(master_row[0]))
# # >>> master_row[0]
# # 'business'
# # >>> hosts_row[1]
# # 'n'
# found = True
# row = row + 1
# elif word == master_row[0] and hosts_row[1] == "Negative":
# f2.write(str(hosts_row[1]) + "," + str(master_row[0]))
# found = True
# row = row + 1
#
# # print count
# if not found:
# print("no")
#
# print(count)
# f1.close()
# f2.close()
```
#### File: 1MT3J45/ML-DroughtAnalysisNLP/SL_RanForGen.py
```python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def read_fit(data_frame):
# IMPORTING DATASET
dataset = data_frame
# Cleaning the Texts
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
# Creating a Corpus
corpus = []
for i in range(0, dataset.__len__()):
review = re.sub('[^a-zA-Z]', ' ', str(dataset['tweet'][i]))
review = review.lower()
review = review.split()
ps = PorterStemmer()
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))]
review = ' '.join(review)
corpus.append(review)
# Creating the Bag Of Words Model
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features=144)
# Sparse Matrix -> CV
X = cv.fit_transform(corpus).toarray()
y = dataset.iloc[:, -1].values
# Splitting Data into Training & Testing
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
# Fitting Random Forest class to the Training set
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators=10, criterion='entropy', random_state=0)
classifier.fit(X_train, y_train)
return classifier, X_test, y_test
def rfg_spv_predict(machine, X_input, y_input):
X_test = X_input
y_test = y_input
classifier = machine
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
return cm
``` |
{
"source": "1MT3J45/ML-RestaurantReviewAnalysis-NLP",
"score": 3
} |
#### File: 1MT3J45/ML-RestaurantReviewAnalysis-NLP/test.py
```python
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.stem import WordNetLemmatizer
from multiprocessing.dummy import Pool as ThreadPool
import os
import pandas as pd
import nltk
import re
import spacy
import progressbar as bar
testB = pd.read_csv("CSV/Restaurants_Test_Data_phaseB.csv")
trainB = pd.read_csv("CSV/Restaurants_Train_v2.csv")
# print(testB.head(5))
# print(trainB.head(5))
trainB_1 = trainB.iloc[:, [0, 7, 5]]
testB_1 = testB.iloc[:, [0, 5, 4]]
fullB = pd.concat([testB_1, trainB_1], axis=0, ignore_index=True)
nltk.download('stopwords')
dataset = fullB # MAJOR DATA-SET
# --------------------- FUNCTIONS --------------------------
def check_dep_parse(token_dep):
dep_str = token_dep
check_list = list()
if dep_str.startswith('nsub'):
pass
elif dep_str.startswith('amod'):
pass
elif dep_str.startswith('rcmod'):
pass
elif dep_str.startswith('dobj'):
pass
elif dep_str.startswith('neg'):
pass
else:
return False
return True
# --------------------- STREAM INITIALIZER ----------------------------
PoS_Tag_sent = list()
S1_corpus = [] # CORPUS (For Collecting Lemmas)
corpora = '' # CORPORA (For Collecting Corpora of single sentence)
S2_super_corpus =[] # CORPUS (For Collecting Bigrams sentence wise)
# --------------------- SPACY SPECS ------------------------
nlp_en = spacy.load('en_core_web_sm')
plot_nlp = 0 # For Plotting of Dependency chart
S3_dep_corpus = [] # CORPUS (For Collecting Dependency Relations)
# ---------------------------------------------------------- STREAM 1 - LEMMATIZATION
try:
for i in range(0, len(dataset)):
review = re.sub("[^a-zA-Z]", ' ', dataset['text'][i])
review = review.lower()
review = review.split()
# Learn More at https://www.quora.com/What-is-difference-between-stemming-and-lemmatization
ps = PorterStemmer()
nl = WordNetLemmatizer()
review = [ps.stem(nl.lemmatize(word, pos='v')) for word in review if not word in set(stopwords.words('english'))]
review = list(set(review))
S1_corpus.append(review)
bar.load(i, base=dataset, text='Stream 1')
print('Stream 1: Processed')
# print(S1_corpus)
# ----------------------------------------------------------- STREAM 2 - BIGRAMS
for i in range(len(dataset)):
sent = nltk.word_tokenize(dataset.iloc[i, 0].lower())
PoS_Tag_sent = nltk.pos_tag(sent)
for (w1, tag1), (w2, tag2) in nltk.bigrams(PoS_Tag_sent):
if tag1.startswith('JJ') and tag2.startswith('NN'): # R1
corpora += w1+' '+w2+';'
elif tag1.startswith('RB') and tag2.startswith('JJ'): # R2
corpora += w1+' '+w2+';'
elif tag1.startswith('JJ') and tag2.startswith('JJ'): # R3
corpora += w1+' '+w2+';'
elif tag1.startswith('NN') and tag2.startswith('JJ'): # R4
corpora += w1+' '+w2+';'
elif tag1.startswith('RB') and tag2.startswith('VB'): # R5
corpora += w1+' '+w2+';'
elif tag1.startswith('VB') and tag2.startswith('NN'): # R6
corpora += w1+' '+w2+';'
elif tag1.startswith('JJ') and tag2.startswith('VB'): # R7
corpora += w1+' '+w2+';'
elif tag1.startswith('RB') and tag2.startswith('RB'): # R8
corpora += w1+' '+w2+';'
elif tag1.startswith('RB') and tag2.startswith('VB'): # R9
corpora += w1+' '+w2+';'
S2_super_corpus.append(corpora)
corpora = ''
bar.load(i, base=dataset, text='Stream 2')
print('Stream 2: Processed')
except KeyboardInterrupt:
print("Terminating. Human Intervention Not Allowed")
exit(0)
# ----------------------------------------------------------- STREAM 3 - DEPENDENCY FEATURES (spaCy)
try:
for increment in range(len(dataset)):
sentence = dataset.iloc[increment, 0].lower()
#print(increment)
for token in nlp_en(sentence):
dep = check_dep_parse(token.dep_)
if dep is True:
# print(token.dep_, end="> ")
# print(token.head, token)
corpora += str(token.head)+' '+str(token)+';'
else:
pass
S3_dep_corpus.append(corpora)
corpora = ''
bar.load(increment,base=dataset, text='Stream 3')
print('Stream 3: Processed')
plot_nlp = nlp_en(sentence)
pass
except TypeError as e:
print("Unexpected Termination:", e)
exit(0)
except KeyboardInterrupt:
print("Human Interrupt Received! Exiting...")
exit(0)
stream1 = pd.Series(S1_corpus)
stream2 = pd.Series(S2_super_corpus)
stream3 = pd.Series(S3_dep_corpus)
df = pd.concat([stream1, stream2, stream3], axis=1)
df = df.rename(columns={0: 'lemmas', 1: 'bigrams', 2: 'depenrel'})
df.to_csv('FeatureSet.csv', index=False)
df =pd.read_csv('FeatureSet.csv', sep=',')
try:
pool = ThreadPool(2)
pool.map(os.system('firefox localhost:5000 &'), spacy.displacy.serve(plot_nlp, style='dep')).join()
exit(0)
except OSError:
print("Browser must start with Graph. If doesn't please make sure to use Ubuntu with Firefox")
except TypeError:
print("Browser must start with Graph. If doesn't please make sure to use Ubuntu with Firefox")
# # Set up spaCy
# from spacy.lang.en import English
# parser = English()
#
# # Test Data
# multiSentence = "There is an art, it says, or rather, a knack to flying." \
# "The knack lies in learning how to throw yourself at the ground and miss." \
# "In the beginning the Universe was created. This has made a lot of people "\
# "very angry and been widely regarded as a bad move."
#
# # all you have to do to parse text is this:
# #note: the first time you run spaCy in a file it takes a little while to load up its modules
# parsedData = parser(multiSentence)
#
# # Let's look at the tokens
# # All you have to do is iterate through the parsedData
# # Each token is an object with lots of different properties
# # A property with an underscore at the end returns the string representation
# # while a property without the underscore returns an index (int) into spaCy's vocabulary
# # The probability estimate is based on counts from a 3 billion word
# # corpus, smoothed using the Simple Good-Turing method.
# for i, token in enumerate(parsedData):
# print("original:", token.orth, token.orth_)
# print("lowercased:", token.lower, token.lower_)
# print("lemma:", token.lemma, token.lemma_)
# print("shape:", token.shape, token.shape_)
# print("prefix:", token.prefix, token.prefix_)
# print("suffix:", token.suffix, token.suffix_)
# print("log probability:", token.prob)
# print("Brown cluster id:", token.cluster)
# print("----------------------------------------")
# if i > 1:
# break
#
# # Let's look at the sentences
# sents = []
# parsedData.is_parsed = True
# # the "sents" property returns spans
# # spans have indices into the original string
# # where each index value represents a token
# for span in parsedData.sents:
# # go from the start to the end of each span, returning each token in the sentence
# # combine each token using join()
# sent = ''.join(parsedData[i].string for i in range(span.start, span.end)).strip()
# sents.append(sent)
#
# for sentence in sents:
# print(sentence)
#
# # Let's look at the part of speech tags of the first sentence
# for span in parsedData.sents:
# sent = [parsedData[i] for i in range(span.start, span.end)]
# break
#
# for token in sent:
# print(token.orth_, token.pos_)
# # ----------------------------------------------------
# import spacy
# nlp = spacy.load("en")
#
# document = "There is an art, it says, or rather, a knack to flying." \
# "The knack lies in learning how to throw yourself at the ground and miss." \
# "In the beginning the Universe was created. This has made a lot of people "\
# "very angry and been widely regarded as a bad move."
#
# document = nlp(document)
# dir(document)
#
# # Getting sentences:
# list(document.sents)
#
# # Part of Speech tagging
# all_tags = {w.pos: w.pos_ for w in document}
#
# # All tags of first sentence of our document
# for word in list(document.sents)[0]:
# print(word, word.tag_)
#
# # Defining some parameters
# noisy_pos_tags = ["PROP"]
# min_token_length = 2
#
# # Dependency Parsing
# # Extracting all review sentences that contains the term 'art'
# art = [sent for sent in document.sents if 'art' in sent.string.lower()]
#
# # create dependency tree
# sentence = art[0]
# for word in sentence:
# print(word, ': ', str(list(word.children)))
``` |
{
"source": "1Nathane/pythonbirds",
"score": 4
} |
#### File: pythonbirds/oo/pessoa.py
```python
class Pessoa:
olhos = 2
def __init__(self,*filhos, nome = None, idade = 50):
self.idade = idade
self.nome = nome
self.filhos = list(filhos)
def cumprimentar(self):
return f'Olá, meu nome é {self.nome}'
@staticmethod
def metodo_estatico():
return 45
@classmethod
def nome_e_atributos_de_classe(cls):
return f'{cls} - olhos {cls.olhos}'
class Homem(Pessoa):
def cumprimentar(self):
cumprimentar_da_classe = super().cumprimentar()
return f'{cumprimentar_da_classe}. Aperto de mão'
class Mutante(Pessoa):
olhos = 3
if __name__ == '__main__':
nathane = Mutante(nome = 'Nathane', idade = 27)
layane = Homem(nome = 'Layane', idade = 27)
rafael = Pessoa(nome = 'Rafael', idade = 18)
luciano = Homem(nathane,rafael,layane, nome = 'Luciano')
print(Pessoa.cumprimentar(luciano))
print(id(luciano))
print(luciano.cumprimentar())#Mesma expressão da linha 15
print(f'Nome: {luciano.nome}')
print(f'Idade: {luciano.idade}')
print(f'Filhos de {luciano.nome}:')
for filho in luciano.filhos:#Necessário porque os filhos de luciano são objetos da classe Pessoa
print(filho.nome)
print(Pessoa.metodo_estatico(), nathane.metodo_estatico())
print(Pessoa.nome_e_atributos_de_classe(),luciano.nome_e_atributos_de_classe())
pessoa = Pessoa('Anonimo')
print(isinstance(pessoa, Pessoa))
print(isinstance(pessoa, Homem))
print(isinstance(nathane, Homem))
print(isinstance(nathane, Pessoa))
print(nathane.olhos)
print(luciano.cumprimentar())
print(nathane.cumprimentar())
``` |
{
"source": "1NCE-GmbH/blueprint-pycom",
"score": 2
} |
#### File: lib/AWSIoTPythonSDK/MQTTClient.py
```python
import lib.AWSIoTPythonSDK.MQTTMsgHandler as msgHandler
import lib.AWSIoTPythonSDK.MQTTConst as mqttConst
import time
import struct
import _thread
class MQTTMessage:
def __init__(self):
self.timestamp = 0
self.state = 0
self.dup = False
self.mid = 0
self.topic = ""
self.payload = None
self.qos = 0
self.retain = False
class MQTTClient:
def __init__(self, clientID, cleanSession, protocol):
self.client_id = clientID
self._cleanSession = cleanSession
self._protocol = protocol
self._userdata = None
self._user = ""
self._password = ""
self._keepAliveInterval = 60
self._will = False
self._will_topic = ""
self._will_message = None
self._will_qos = 0
self._will_retain = False
self._connectdisconnectTimeout = 30
self._mqttOperationTimeout = 5
self._topic_callback_queue = []
self._callback_mutex = _thread.allocate_lock()
self._pid = 0
self._subscribeSent = False
self._unsubscribeSent = False
self._baseReconnectTimeSecond = 1
self._maximumReconnectTimeSecond = 32
self._minimumConnectTimeSecond = 20
self._msgHandler = msgHandler.MsgHandler(self._recv_callback)
def getClientID(self):
return self.client_id
def configEndpoint(self, srcHost, srcPort):
self._msgHandler.setEndpoint(srcHost, srcPort)
def configCredentials(self, srcCAFile, srcKey, srcCert):
self._msgHandler.setCredentials(srcCAFile, srcKey, srcCert)
def setConnectDisconnectTimeoutSecond(self, srcConnectDisconnectTimeout):
self._connectdisconnectTimeout = srcConnectDisconnectTimeout
def setMQTTOperationTimeoutSecond(self, srcMQTTOperationTimeout):
self._mqttOperationTimeout = srcMQTTOperationTimeout
self._msgHandler.setOperationTimeout(srcMQTTOperationTimeout)
def clearLastWill(self):
self._will = False
self._will_topic = ""
self._will_message = None
self._will_qos = 0
self._will_retain = False
def setLastWill(self, topic, payload=None, QoS=0, retain=False):
self._will = True
self._will_qos = QoS
self._will_retain = retain
self._will_topic = topic.encode('utf-8')
if isinstance(payload, bytearray):
self._will_message = payload
elif isinstance(payload, str):
self._will_message = payload.encode('utf-8')
elif isinstance(payload, int) or isinstance(payload, float):
self._will_message = str(payload)
def configIAMCredentials(self, srcAWSAccessKeyID, srcAWSSecretAccessKey, srcAWSSessionToken):
raise NotImplementedError('Websockets not supported')
def setOfflinePublishQueueing(self, srcQueueSize, srcDropBehavior):
if srcDropBehavior != mqttConst.DROP_OLDEST and srcDropBehavior != mqttConst.DROP_NEWEST:
raise ValueError("Invalid packet drop behavior")
self._msgHandler.setOfflineQueueConfiguration(srcQueueSize, srcDropBehavior)
def setDrainingIntervalSecond(self, srcDrainingIntervalSecond):
self._msgHandler.setDrainingInterval(srcDrainingIntervalSecond)
def setBackoffTiming(self, srcBaseReconnectTimeSecond, srcMaximumReconnectTimeSecond, srcMinimumConnectTimeSecond):
self._baseReconnectTimeSecond = srcBaseReconnectTimeSecond
self._maximumReconnectTimeSecond = srcMaximumReconnectTimeSecond
self._minimumConnectTimeSecond = srcMinimumConnectTimeSecond
def connect(self, keepAliveInterval=30):
self._keepAliveInterval = keepAliveInterval
if not self._msgHandler.createSocketConnection():
return False
self._send_connect(self._keepAliveInterval, self._cleanSession)
# delay to check the state
count_10ms = 0
while (count_10ms <= self._connectdisconnectTimeout * 100 and not self._msgHandler.isConnected()):
count_10ms += 1
time.sleep(0.01)
return True if self._msgHandler.isConnected() else False
def subscribe(self, topic, qos, callback):
if (topic is None or callback is None):
raise TypeError("Invalid subscribe values.")
topic = topic.encode('utf-8')
header = mqttConst.MSG_SUBSCRIBE | (1 << 1)
pkt = bytearray([header])
pkt_len = 2 + 2 + len(topic) + 1 # packet identifier + len of topic (16 bits) + topic len + QOS
pkt.extend(self._encode_varlen_length(pkt_len)) # len of the remaining
self._pid += 1
pkt.extend(self._encode_16(self._pid))
pkt.extend(self._pascal_string(topic))
pkt.append(qos)
self._subscribeSent = False
self._msgHandler.push_on_send_queue(pkt)
count_10ms = 0
while (count_10ms <= self._mqttOperationTimeout * 100 and not self._subscribeSent):
count_10ms += 1
time.sleep(0.01)
if self._subscribeSent:
self._callback_mutex.acquire()
self._topic_callback_queue.append((topic, callback))
self._callback_mutex.release()
return True
return False
def publish(self, topic, payload, qos, retain, dup=False):
topic = topic.encode('utf-8')
payload = payload.encode('utf-8')
header = mqttConst.MSG_PUBLISH | (dup << 3) | (qos << 1) | retain
pkt_len = (2 + len(topic) +
(2 if qos else 0) +
(len(payload)))
pkt = bytearray([header])
pkt.extend(self._encode_varlen_length(pkt_len)) # len of the remaining
pkt.extend(self._pascal_string(topic))
if qos:
self._pid += 1 # todo: I don't think this is the way to deal with the packet id
pkt.extend(self._encode_16(self._pid))
pkt = pkt + payload
self._msgHandler.push_on_send_queue(pkt)
def _encode_16(self, x):
return struct.pack("!H", x)
def _pascal_string(self, s):
return struct.pack("!H", len(s)) + s
def _encode_varlen_length(self, length):
i = 0
buff = bytearray()
while 1:
buff.append(length % 128)
length = length // 128
if length > 0:
buff[i] = buff[i] | 0x80
i += 1
else:
break
return buff
def _topic_matches_sub(self, sub, topic):
result = True
multilevel_wildcard = False
slen = len(sub)
tlen = len(topic)
if slen > 0 and tlen > 0:
if (sub[0] == '$' and topic[0] != '$') or (topic[0] == '$' and sub[0] != '$'):
return False
spos = 0
tpos = 0
while spos < slen and tpos < tlen:
if sub[spos] == topic[tpos]:
if tpos == tlen - 1:
# Check for e.g. foo matching foo/#
if spos == slen - 3 and sub[spos + 1] == '/' and sub[spos + 2] == '#':
result = True
multilevel_wildcard = True
break
spos += 1
tpos += 1
if tpos == tlen and spos == slen - 1 and sub[spos] == '+':
spos += 1
result = True
break
else:
if sub[spos] == '+':
spos += 1
while tpos < tlen and topic[tpos] != '/':
tpos += 1
if tpos == tlen and spos == slen:
result = True
break
elif sub[spos] == '#':
multilevel_wildcard = True
if spos + 1 != slen:
result = False
break
else:
result = True
break
else:
result = False
break
if not multilevel_wildcard and (tpos < tlen or spos < slen):
result = False
return result
def _remove_topic_callback(self, topic):
deleted = False
self._callback_mutex.acquire()
for i in range(0, len(self._topic_callback_queue)):
if self._topic_callback_queue[i][0] == topic:
self._topic_callback_queue.pop(i)
deleted = True
self._callback_mutex.release()
return deleted
def unsubscribe(self, topic):
self._unsubscribeSent = False
self._send_unsubscribe(topic, False)
count_10ms = 0
while (count_10ms <= self._mqttOperationTimeout * 100 and not self._unsubscribeSent):
count_10ms += 1
time.sleep(0.01)
if self._unsubscribeSent:
topic = topic.encode('utf-8')
return self._remove_topic_callback(topic)
return False
def disconnect(self):
pkt = struct.pack('!BB', mqttConst.MSG_DISCONNECT, 0)
self._msgHandler.push_on_send_queue(pkt)
time.sleep(self._connectdisconnectTimeout)
self._msgHandler.disconnect()
return True
def _send_connect(self, keepalive, clean_session):
msg_sent = False
pkt_len = (12 + len(self.client_id) + # 10 + 2 + len(client_id)
(2 + len(self._user) if self._user else 0) +
(2 + len(self._password) if self._password else 0))
flags = (0x80 if self._user else 0x00) | (0x40 if self._password else 0x00) | (0x02 if clean_session else 0x00)
if self._will_message:
flags |= (self._will_retain << 3 | self._will_qos << 1 | 1) << 2
pkt_len += 4 + len(self._will_topic) + len(self._will_message)
pkt = bytearray([mqttConst.MSG_CONNECT]) # connect
pkt.extend(self._encode_varlen_length(pkt_len)) # len of the remaining
pkt.extend(b'\x00\x04MQTT\x04') # len of "MQTT" (16 bits), protocol name, and protocol version
pkt.append(flags)
pkt.extend(b'\x00\x00') # disable keepalive
pkt.extend(self._pascal_string(self.client_id))
if self._will_message:
pkt.extend(self._pascal_string(self._will_topic))
pkt.extend(self._pascal_string(self._will_message))
if self._user:
pkt.extend(self._pascal_string(self._user))
if self._password:
pkt.extend(self._pascal_string(self._password))
return self._msgHandler.priority_send(pkt)
def _send_unsubscribe(self, topic, dup=False):
pkt = bytearray()
msg_type = mqttConst.MSG_UNSUBSCRIBE | (dup << 3) | (1 << 1)
pkt.extend(struct.pack("!B", msg_type))
remaining_length = 2 + 2 + len(topic)
pkt.extend(self._encode_varlen_length(remaining_length))
self._pid += 1
pkt.extend(self._encode_16(self._pid))
pkt.extend(self._pascal_string(topic))
return self._msgHandler.push_on_send_queue(pkt)
def _send_puback(self, msg_id):
remaining_length = 2
pkt = struct.pack('!BBH', mqttConst.MSG_PUBACK, remaining_length, msg_id)
return self._msgHandler.push_on_send_queue(pkt)
def _send_pubrec(self, msg_id):
remaining_length = 2
pkt = struct.pack('!BBH', mqttConst.MSG_PUBREC, remaining_length, msg_id)
return self._msgHandler.push_on_send_queue(pkt)
def _parse_connack(self, payload):
if len(payload) != 2:
return False
(flags, result) = struct.unpack("!BB", payload)
if result == 0:
self._msgHandler.setConnectionState(mqttConst.STATE_CONNECTED)
return True
else:
self._msgHandler.setConnectionState(mqttConst.STATE_DISCONNECTED)
return False
def _parse_suback(self, payload):
self._subscribeSent = True
print('Subscribed to topic')
return True
def _parse_puback(self, payload):
return True
def _notify_message(self, message):
notified = False
self._callback_mutex.acquire()
for t_obj in self._topic_callback_queue:
if self._topic_matches_sub(t_obj[0], message.topic):
t_obj[1](self, self._userdata, message)
notified = True
self._callback_mutex.release()
return notified
def _parse_publish(self, cmd, packet):
msg = MQTTMessage()
msg.dup = (cmd & 0x08) >> 3
msg.qos = (cmd & 0x06) >> 1
msg.retain = (cmd & 0x01)
pack_format = "!H" + str(len(packet) - 2) + 's'
(slen, packet) = struct.unpack(pack_format, packet)
pack_format = '!' + str(slen) + 's' + str(len(packet) - slen) + 's'
(msg.topic, packet) = struct.unpack(pack_format, packet)
if len(msg.topic) == 0:
return False
if msg.qos > 0:
pack_format = "!H" + str(len(packet) - 2) + 's'
(msg.mid, packet) = struct.unpack(pack_format, packet)
msg.payload = packet
if msg.qos == 0:
self._notify_message(msg)
elif msg.qos == 1:
self._send_puback(msg.mid)
self._notify_message(msg)
elif msg.qos == 2:
self._send_pubrec(msg.mid)
self._notify_message(msg)
else:
return False
return True
def _parse_unsuback(self, payload):
self._unsubscribeSent = True
return True
def _parse_pingresp(self):
self._msgHandler.setPingFlag(True)
return True
def _recv_callback(self, cmd, payload):
msg_type = cmd & 0xF0
if msg_type == mqttConst.MSG_CONNACK:
return self._parse_connack(payload)
elif msg_type == mqttConst.MSG_SUBACK:
return self._parse_suback(payload)
elif msg_type == mqttConst.MSG_PUBACK:
return self._parse_puback(payload)
elif msg_type == mqttConst.MSG_PUBLISH:
return self._parse_publish(cmd, payload)
elif msg_type == mqttConst.MSG_UNSUBACK:
return self._parse_unsuback(payload)
elif msg_type == mqttConst.MSG_PINGRESP:
return self._parse_pingresp()
else:
print('Unknown message type: %d' % msg_type)
return False
def insertShadowCallback(self, callback, payload, status, token):
self._msgHandler.insertShadowCallback(callback, payload, status, token)
```
#### File: lib/microcoapy/coap_packet.py
```python
import lib.microcoapy.coap_macros as macros
from lib.microcoapy.coap_option import CoapOption
class CoapPacket:
def __init__(self):
self.version = macros.COAP_VERSION.COAP_VERSION_UNSUPPORTED
self.type = macros.COAP_TYPE.COAP_CON # uint8_t
self.method = macros.COAP_METHOD.COAP_GET # uint8_t
self.token = bytearray()
self.payload = bytearray()
self.message_id = 0
self.content_format = macros.COAP_CONTENT_FORMAT.COAP_NONE
self.query = bytearray() # uint8_t*
self.options = []
def add_option(self, number, opt_payload):
if len(self.options) >= macros.MAX_OPTION_NUM:
return
self.options.append(CoapOption(number, opt_payload))
def set_uri_host(self, address):
self.add_option(macros.COAP_OPTION_NUMBER.COAP_URI_HOST, address)
def set_uri_path(self, url):
for subPath in url.split('/'):
self.add_option(macros.COAP_OPTION_NUMBER.COAP_URI_PATH, subPath)
def to_string(self):
class_, detail = macros.CoapResponseCode.decode(self.method)
return "type: {}, method: {}.{:02d}, messageid: {}, payload: {}".format(
macros.coap_type_to_string(self.type),
class_, detail,
self.message_id,
self.payload)
```
#### File: lib/microcoapy/microcoapy.py
```python
import binascii
import uos
import usocket as socket
import utime as time
import lib.microcoapy.coap_macros as macros
from lib.microcoapy.coap_packet import CoapPacket
from lib.microcoapy.coap_reader import parse_packet_header_info
from lib.microcoapy.coap_reader import parse_packet_options_and_payload
from lib.microcoapy.coap_writer import write_packet_header_info
from lib.microcoapy.coap_writer import write_packet_options
from lib.microcoapy.coap_writer import write_packet_payload
class Coap:
TRANSMISSION_STATE = macros.enum(
STATE_IDLE=0,
STATE_SEPARATE_ACK_RECEIVED_WAITING_DATA=1
)
def __init__(self):
self.debug = True
self.sock = None
self.callbacks = {}
self.response_callback = None
self.port = 0
self.isServer = False
self.state = self.TRANSMISSION_STATE.STATE_IDLE
self.isCustomSocket = False
# beta flags
self.discardRetransmissions = False
self.lastPacketStr = ""
def log(self, s):
if self.debug:
print("[microcoapy]: " + s)
# Create and initialize a new UDP socket to listen to.
# port: the local port to be used.
def start(self, port=macros.COAP_DEFAULT_PORT):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(('', port))
# Stop and destroy the socket that has been created by
# a previous call of 'start' function
def stop(self):
if self.sock is not None:
self.sock.close()
self.sock = None
# Set a custom instance of a UDP socket
# Is used instead of calling start/stop functions.
#
# Note: This overrides the automatic socket that has been created
# by the 'start' function.
# The custom socket must support functions:
# * socket.sendto(bytes, address)
# * socket.recvfrom(bufsize)
# * socket.setblocking(flag)
def set_custom_socket(self, custom_socket):
self.stop()
self.isCustomSocket = True
self.sock = custom_socket
def add_incoming_request_callback(self, request_url, callback):
self.callbacks[request_url] = callback
self.isServer = True
def send_packet(self, ip, port, coap_packet):
if coap_packet.content_format != macros.COAP_CONTENT_FORMAT.COAP_NONE:
option_buffer = bytearray(2)
option_buffer[0] = (coap_packet.content_format & 0xFF00) >> 8
option_buffer[1] = (coap_packet.content_format & 0x00FF)
coap_packet.add_option(macros.COAP_OPTION_NUMBER.COAP_CONTENT_FORMAT, option_buffer)
if (coap_packet.query is not None) and (len(coap_packet.query) > 0):
coap_packet.add_option(macros.COAP_OPTION_NUMBER.COAP_URI_QUERY, coap_packet.query)
buffer = bytearray()
write_packet_header_info(buffer, coap_packet)
write_packet_options(buffer, coap_packet)
write_packet_payload(buffer, coap_packet)
status = 0
try:
socket_address = (ip, port)
try:
socket_address = socket.getaddrinfo(ip, port)[0][-1]
except Exception as e:
pass
status = self.sock.sendto(buffer, socket_address)
if status > 0:
status = coap_packet.message_id
self.log('Packet sent. messageid: ' + str(status))
except Exception as e:
status = 0
print('Exception while sending packet...')
import sys
sys.print_exception(e)
return status
def send(self, ip, port, url, packet_type, method, token, payload, content_format, query_option):
packet = CoapPacket()
packet.type = packet_type
packet.method = method
packet.token = token
packet.payload = payload
packet.content_format = content_format
packet.query = query_option
return self.send_ex(ip, port, url, packet)
def send_ex(self, ip, port, url, packet):
self.state = self.TRANSMISSION_STATE.STATE_IDLE
# messageId field: 16bit -> 0-65535
# urandom to generate 2 bytes
random_bytes = uos.urandom(2)
packet.message_id = (random_bytes[0] << 8) | random_bytes[1]
packet.set_uri_host(ip)
packet.set_uri_path(url)
return self.send_packet(ip, port, packet)
# to be tested
def send_response(self, ip, port, message_id, payload, method, content_format, token):
packet = CoapPacket()
packet.type = macros.COAP_TYPE.COAP_ACK
packet.method = method
packet.token = token
packet.payload = payload
packet.message_id = message_id
packet.content_format = content_format
return self.send_packet(ip, port, packet)
def get(self, ip, port, url, token=bytearray()):
return self.send(ip, port, url, macros.COAP_TYPE.COAP_CON, macros.COAP_METHOD.COAP_GET, token, None,
macros.COAP_CONTENT_FORMAT.COAP_NONE, None)
def put(self, ip, port, url, payload=bytearray(), query_option=None,
content_format=macros.COAP_CONTENT_FORMAT.COAP_NONE, token=bytearray()):
return self.send(ip, port, url, macros.COAP_TYPE.COAP_CON, macros.COAP_METHOD.COAP_PUT, token, payload,
content_format, query_option)
def post(self, ip, port, url, payload=bytearray(), query_option=None,
content_format=macros.COAP_CONTENT_FORMAT.COAP_NONE, token=bytearray()):
return self.send(ip, port, url, macros.COAP_TYPE.COAP_CON, macros.COAP_METHOD.COAP_POST, token, payload,
content_format, query_option)
def get_non_confirmable(self, ip, port, url, token=bytearray()):
return self.send(ip, port, url, macros.COAP_TYPE.COAP_NONCON, macros.COAP_METHOD.COAP_GET, token, None,
macros.COAP_CONTENT_FORMAT.COAP_NONE, None)
def put_non_confirmable(self, ip, port, url, payload=bytearray(), query_option=None,
content_format=macros.COAP_CONTENT_FORMAT.COAP_NONE, token=bytearray()):
return self.send(ip, port, url, macros.COAP_TYPE.COAP_NONCON, macros.COAP_METHOD.COAP_PUT, token, payload,
content_format, query_option)
def post_non_confirmable(self, ip, port, url, payload=bytearray(), query_option=None,
content_format=macros.COAP_CONTENT_FORMAT.COAP_NONE, token=bytearray()):
return self.send(ip, port, url, macros.COAP_TYPE.COAP_NONCON, macros.COAP_METHOD.COAP_POST, token, payload,
content_format, query_option)
def handle_incoming_request(self, request_packet, source_ip, source_port):
url = ""
for opt in request_packet.options:
if (opt.number == macros.COAP_OPTION_NUMBER.COAP_URI_PATH) and (len(opt.buffer) > 0):
if url != "":
url += "/"
url += opt.buffer.decode('unicode_escape')
url_callback = None
if url != "":
url_callback = self.callbacks.get(url)
if url_callback is None:
print('Callback for url [', url, "] not found")
self.send_response(source_ip, source_port, request_packet.message_id,
None, macros.COAP_RESPONSE_CODE.COAP_NOT_FOUND,
macros.COAP_CONTENT_FORMAT.COAP_NONE, request_packet.token)
else:
url_callback(request_packet, source_ip, source_port)
def read_bytes_from_socket(self, num_of_bytes):
try:
return self.sock.recvfrom(num_of_bytes)
except Exception:
return None, None
def parse_packet_token(self, buffer, packet):
if packet.tokenLength == 0:
packet.token = None
elif packet.tokenLength <= 8:
packet.token = buffer[4:4 + packet.tokenLength]
else:
(tempBuffer, tempRemoteAddress) = self.read_bytes_from_socket(macros.BUF_MAX_SIZE - len(buffer))
if tempBuffer is not None:
buffer.extend(tempBuffer)
return False
return True
def loop(self, blocking=True):
if self.sock is None:
return False
self.sock.setblocking(blocking)
(buffer, remoteAddress) = self.read_bytes_from_socket(macros.BUF_MAX_SIZE)
self.sock.setblocking(True)
while (buffer is not None) and (len(buffer) > 0):
buffer_len = len(buffer)
if (buffer_len < macros.COAP_HEADER_SIZE) or (((buffer[0] & 0xC0) >> 6) != 1):
(tempBuffer, tempRemoteAddress) = self.read_bytes_from_socket(macros.BUF_MAX_SIZE - buffer_len)
if tempBuffer is not None:
buffer.extend(tempBuffer)
continue
packet = CoapPacket()
self.log("Incoming Packet bytes: " + str(binascii.hexlify(bytearray(buffer))))
parse_packet_header_info(buffer, packet)
if not self.parse_packet_token(buffer, packet):
continue
if not parse_packet_options_and_payload(buffer, packet):
return False
# beta functionality
if self.discardRetransmissions:
if packet.to_string() == self.lastPacketStr:
self.log("Discarded retransmission message: " + packet.to_string())
return False
else:
self.lastPacketStr = packet.to_string()
####
if self.isServer:
self.handle_incoming_request(packet, remoteAddress[0], remoteAddress[1])
else:
# To handle cases of Separate response (rfc7252 #5.2.2)
if packet.type == macros.COAP_TYPE.COAP_ACK and \
packet.method == macros.COAP_METHOD.COAP_EMPTY_MESSAGE:
self.state = self.TRANSMISSION_STATE.STATE_SEPARATE_ACK_RECEIVED_WAITING_DATA
return False
# case of piggybacked response where the response is in the ACK (rfc7252 #5.2.1)
# or the data of a separate message
else:
if self.state == self.TRANSMISSION_STATE.STATE_SEPARATE_ACK_RECEIVED_WAITING_DATA:
self.state = self.TRANSMISSION_STATE.STATE_IDLE
self.send_response(remoteAddress[0], remoteAddress[1], packet.message_id,
None, macros.COAP_TYPE.COAP_ACK,
macros.COAP_CONTENT_FORMAT.COAP_NONE, packet.token)
if self.response_callback is not None:
self.response_callback(packet, remoteAddress)
return True
return False
def poll(self, timeout_ms=-1, poll_period_ms=500):
start_time = time.ticks_ms()
status = False
while not status:
status = self.loop(False)
if time.ticks_diff(time.ticks_ms(), start_time) >= timeout_ms:
break
time.sleep_ms(poll_period_ms)
return status
def set_response_callback(self, callback):
self.response_callback = callback
```
#### File: onboarding/helper/file_helper.py
```python
class FileHelper:
@staticmethod
def write_file(content, path):
"""
Writes the content to a file
:param content: Content that needs to be written to the file
:param path: File path
"""
with open(path, "wb") as file:
file.write(content)
```
#### File: nce/translator/message_service.py
```python
import ustruct as struct
class ValueType:
STRING = "string"
CHAR = "char"
DOUBLE = "double"
FLOAT = "float"
INT = "int"
UINT = "uint"
SHORT = "short"
BOOLEAN = "boolean"
def fill_bytes(byte_array, start, end, value, value_type):
if value_type == ValueType.CHAR and value is not None and value is not '':
byte_array[start:end] = struct.pack("s", value)
elif value_type == ValueType.STRING and value is not None and value is not '':
byte_array[start:end] = struct.pack("{}s".format(end - start), value)
elif value_type == ValueType.DOUBLE and value is not None and value is not '':
byte_array[start:end] = struct.pack("d", float(value))
elif value_type == ValueType.FLOAT and value is not None and value is not '':
byte_array[start:end] = struct.pack("f", float(value))
elif value_type == ValueType.INT and value is not None and value is not '':
byte_array[start:end] = struct.pack("i", int(value))
elif value_type == ValueType.UINT and value is not None and value is not '':
byte_array[start:end] = struct.pack("I", int(value))
elif value_type == ValueType.SHORT and value is not None and value is not '':
byte_array[start:end] = struct.pack("h", int(value))
return byte_array
``` |
{
"source": "1ncend1ary/Habitican-Curse",
"score": 3
} |
#### File: Habitican-Curse/habitican_curse/config.py
```python
import curses
import datetime
import os
NUM_CONTEXT_REGISTERS = 4
# Screen Specifications - Will be adjusted during runtime
SCR_MAX_MENU_ROWS = 22 # Keep this as an even number please
SCR_X = 0
SCR_Y = 0
SCR_MENU_ITEM_WIDTH = 0
SCR_FIRST_HALF_LENGTH = 0 # Space taken up by the upper half menu of tasks
SCR_TEXT_AREA_LENGTH = 0
# Colors
SCR_COLOR_RED = 2
SCR_COLOR_GREEN = 3
SCR_COLOR_YELLOW = 4
SCR_COLOR_BLUE = 5
SCR_COLOR_WHITE = 6
SCR_COLOR_MAGENTA = 7
SCR_COLOR_CYAN = 14
SCR_COLOR_LIGHT_ORANGE = 8
SCR_COLOR_DARK_ORANGE = 9
SCR_COLOR_DARK_GRAY = 15
SCR_COLOR_LIGHT_GRAY = 16
# Display Colors with Background
SCR_COLOR_WHITE_GRAY_BGRD = 12
SCR_COLOR_GRAY_WHITE_BGRD = 13
SCR_COLOR_RED_GRAY_BGRD = 17
SCR_COLOR_GREEN_GRAY_BGRD = 18
SCR_COLOR_YELLOW_GRAY_BGRD = 19
SCR_COLOR_BLUE_GRAY_BGRD = 20
SCR_COLOR_WHITE_GRAY_BGRD = 21
SCR_COLOR_MAGENTA_GRAY_BGRD = 22
# Background Color (Default black)
SCR_COLOR_BGRD = curses.COLOR_BLACK
# Special Color Codes
SCR_COLOR_NEUTRAL = SCR_COLOR_LIGHT_GRAY
# Special Symbols
SYMBOL_TICK = '\u2714'
SYMBOL_DISC = '\u25CF'
SYMBOL_DOWN_TRIANGLE = '\u25BC'
SYMBOL_UP_TRIANGLE = '\u25B2'
SYMBOL_DELETE = 'x'
SYMBOL_HEART = '\u2665'
SYMBOL_EXPERIENCE = '\u2605'
SYMBOL_GOLD = '\u25CF'
SYMBOL_MANA = '\u2600'
SYMBOL_EDIT = '\u270E'
SYMBOL_LEVEL = '\u2949'
SYMBOL_DUE = '\u29D6'
SYMBOL_CHALLENGE_FLAG = '\u2691'
# Status Attributes
HabitStatus = {'+': 0, '-': 0, SYMBOL_DELETE: False, SYMBOL_EDIT: False}
HabitPosStatus = {'+': 0, SYMBOL_DELETE: False, SYMBOL_EDIT: False}
HabitNegStatus = {'-': 0, SYMBOL_DELETE: False, SYMBOL_EDIT: False}
UnscoredHabitStatus = {SYMBOL_DELETE: False, SYMBOL_EDIT: False}
TODODailyStatus = {SYMBOL_TICK: False, SYMBOL_DELETE: False, SYMBOL_EDIT: False}
ChecklistStatus = {SYMBOL_TICK: False, SYMBOL_DELETE: False, SYMBOL_EDIT: False}
# Function for setting values at runtime
def ConfigureRuntime(screen):
global SCR_Y, SCR_X, SCR_MENU_ITEM_WIDTH, SCR_TEXT_AREA_LENGTH, SCR_FIRST_HALF_LENGTH
SCR_X, SCR_Y = screen.getmaxyx()
SCR_MENU_ITEM_WIDTH = (SCR_Y - 10)/3
SCR_TEXT_AREA_LENGTH = (SCR_X - (SCR_MAX_MENU_ROWS + 7 + 4))
SCR_FIRST_HALF_LENGTH = SCR_MAX_MENU_ROWS + 7
# Parser Settings
SET_COMMANDS = ["d", "due", "every", "weekly", "direction"]
DIFFS = ["trivial", "easy", "medium", "hard"]
DATEPARSER = datetime.datetime.strptime
DATEFORMATS = ["%d/%m/%Y", "%d/%m/%y"]
DEFAULT_REPEAT = {'m': True, 't': True, 'w': True, 'th': True, 'f': True, 's': True, 'su': True}
# Configuration file settings
user_config = None
#Read in the configuration files
def ReadConfigFile():
global user_config
user_config = dict()
CONFIG_FILE = os.getenv("HOME")+'/.habiticarc'
try:
f = open(CONFIG_FILE, 'r')
except:
import sys
print("Enter UUID: ", end=' ')
uuid = input().strip()
print(" ")
print("Enter API-Key: ", end=' ')
key = input().strip()
f = open(CONFIG_FILE, 'w+')
f.write("uuid="+uuid+"\n")
f.write("key="+key+"\n")
f.write("debug_lvl=50\n")
f.close()
f = open(CONFIG_FILE, 'r')
for x in f.readlines():
x = x[:-1].split("=")
user_config[x[0]] = x[1]
f.close()
def getConfig(value):
if( user_config is None):
ReadConfigFile()
if( value in user_config ):
return user_config[value]
return None
``` |
{
"source": "1ncend1ary/wwa",
"score": 3
} |
#### File: wwa/web/scraper.py
```python
import requests
from web import logger, secret
_imagenet_url = 'http://www.image-net.org/api/text/imagenet.synset.geturls?wnid={}'
_bing_url = "https://api.cognitive.microsoft.com/bing/v7.0/images/search"
def bing_suppy_images(keyword, number_of_images):
"""
Get list of supplementary images from bing search by keyword
"""
headers = {'Ocp-Apim-Subscription-Key': secret.subscription_key}
params = {'q': keyword, 'license': 'public', 'imageType': 'photo'}
try:
response = requests.get(_bing_url, headers=headers, params=params)
response.raise_for_status()
search_results = response.json()
logger.info('Got a list of images from Bing.')
return [img['thumbnailUrl'] for img in search_results['value'][:number_of_images]]
except requests.exceptions.RequestException as e:
# any error caught while reading from the web, returning no images
logger.exception(e)
return []
def i_supply_images(category_id):
"""
Get list of images from image-net by category_id
"""
url = _imagenet_url.format(category_id)
try:
logger.info('Got a list of images from image-net.')
return requests.get(url).text.splitlines()
except requests.exceptions.RequestException as e:
# any error caught while reading from the web, returning no images
logger.exception(e)
return []
def supply_images(category_id, category_names):
"""
Get list of images from both Bing and image-net
:returns: list of images
"""
images = i_supply_images(category_id)
for cn in category_names:
images = bing_suppy_images(cn, 2) + images
return images
```
#### File: wwa/web/word2vec.py
```python
import gensim
import numpy
import math
from web import logger
from statistics import mean
class Vectoriser:
"""
Words and vectors handling utility class
"""
__path = 'web/model/GoogleNews-vectors-negative300.bin.gz'
__limit = 100000
__isBinary = True
logger.info('Starting training.')
__model = gensim.models.KeyedVectors.load_word2vec_format(__path, binary=__isBinary, limit=__limit)
logger.info('Finished training.')
def __to_vec(self, word):
"""
Convert a word to vector using a pre trained model
"""
vector = None
for w in word.split():
try:
cur_model = self.__model[w]
if vector is None:
vector = cur_model
else:
vector = numpy.add(vector, cur_model)
except KeyError:
# Word not found in model
return None
return vector
def sort(self, base, words):
"""
Sort words based on euclidean distance from base word
:deprecated:
"""
base_vec = self.__to_vec(base)
sorted_words = sorted(words, key=lambda x: self.__euclidean(base_vec, self.__to_vec(x)))
return sorted_words
def sort_with_f(self, bases, words, f):
"""
Sort words based on average euclidean distance from all base words applying a function to every element
:param bases: list of base words to calculate distance to
:param words: list of tuples - words to be sorted
:param f: function to be applied to every word before sort
:returns: list of sorted tuples
"""
base_vectors = [self.__to_vec(base) for base in bases if self.__to_vec(base) is not None]
if len(base_vectors) > 0:
sorted_words = sorted(words, key=lambda x: mean([self.__euclidean(basevec, self.__to_vec(f(x)))
for basevec in base_vectors]))
return sorted_words
else:
return words
@staticmethod
def __pow2(value):
return math.pow(value, 2)
def __euclidean(self, vector1, vector2):
"""
Get euclidean distance between two vectors
"""
if vector1 is None or vector2 is None:
return float('inf')
return math.sqrt(sum(list((map(self.__pow2, map(lambda x, y: x - y, vector1, vector2))))))
def __new__(cls):
"""
Declare this class as singleton
This method is initiated before __init__ and check whether an instance of this class already exists
"""
if not hasattr(cls, 'instance'):
cls.instance = super(Vectoriser, cls).__new__(cls)
return cls.instance
``` |
{
"source": "1ncurred-da3mon/Cells",
"score": 3
} |
#### File: Cells/sense_cells/emulator.py
```python
import threading
import turtle
import random
import time
import init
import sys
import re
global sensor
global window
global funcs
global isinfunc
global incomment
funcs = dict()
currentfunc = None
isinfunc = False
incomment = False
class Cell:
def __init__(self):
self.color = None
self.t_obj = turtle.Turtle(shape='circle')
self.t_obj.shapesize(3)
self.t_obj.speed(0)
self.t_obj.penup()
self.t_obj.ht()
def initialize(self, color='#787878'):
self.color = color
self.t_obj.color(color)
def set_color(self, color):
self.color = color
self.t_obj.color(self.color)
class Vector2:
def __init__(self, x = -1, y = -1):
self.X = x
self.Y = y
def __repr__(self):
return (self.X, self.Y)
def __str__(self):
return f"({self.X}, {self.Y})"
class Senser:
def __init__(self, countx, county):
self.cells = [[Cell() for x in range(countx)] for y in range(county)]
self.cell_count = Vector2(countx, county)
def initialize_all(self):
self.initializeAll()
def initializeAll(self):
px = -390/2
py = 390/2 - 10
for j in range(self.cell_count.Y):
px = -390/2
for i in range(self.cell_count.X):
self.cells[j][i].initialize()
self.cells[j][i].t_obj.goto(px, py)
self.cells[j][i].t_obj.showturtle()
px += 80
if px >= 390:
print("Too many x Objects")
print(self.cell_count)
if py <= -390/2:
print("Too many y objects!")
print(self.cell_count)
print("current y:", j+1)
sys.exit(1)
py -= 90
sensor = Senser(6, 5)
def matchesColor(obj: str) -> bool:
for color in ("yellow, gold, orange, red, maroon, violet, magenta, purple, navy, blue, skyblue, cyan, turquoise, lightgreen, green, darkgreen, chocolate, brown, black, gray, white".split(', ')):
if obj == color or re.match(r"#([0-9A-F]{6})", obj):
return True
return False
def gen_random_color():
rstr = '#'
for _ in range(6):
rstr += random.choice(list('0123456789ABCDEF'))
return rstr
def InterpretInput(str_input: str) -> None:
global currentfunc
global incomment
global isinfunc
global window
global sensor
global funcs
# check if insdie comments
if incomment:
if '*/' in str_input:
str_input = str_input[str_input.find('*/')+2:]
incomment = False
else:
return
str_input = str_input.strip().lower()
# current adding words to a function
if isinfunc:
if str_input == "end":
isinfunc = False
currentfunc = None
else:
funcs[currentfunc].append(str_input)
# call help command
elif str_input == "help":
print("set [cell.../color] <color>")
# single line comment
elif str_input.startswith('//'):
return
#multiline comment
elif str_input.startswith('/*'):
incomment = True
# set a cell to a certain color
elif str_input.startswith("set "):
splitinput = str_input.split(' ')
if len(splitinput) < 2:
print("invalid input!")
elif matchesColor(splitinput[-1]) or splitinput[-1] == 'rand_color':
if len(splitinput) == 2:
for y in range(sensor.cell_count.Y):
for x in range(sensor.cell_count.X):
if splitinput[-1] == 'rand_color':
sensor.cells[y][x].set_color(gen_random_color())
else:
sensor.cells[y][x].set_color(splitinput[1])
else:
newinp = splitinput[1:len(splitinput)-1]
#currentcell = []
#try:
for cell in newinp:
line = list(cell)[0].upper()
#print(enumerate(list("ABCDE")))
for i in enumerate(list("ABCDE")):
if line == i[1]:
line = i[0]
break
else:
print("Out of range!")
sys.exit(1)
if len(list(cell)) == 1:
for i in range(6):
print(i)
if splitinput[-1] == 'rand_color':
sensor.cells[line][i].set_color(gen_random_color())
else:
sensor.cells[line][i].set_color(splitinput[-1])
else:
col = int(list(cell)[1])
if splitinput[-1] == 'rand_color':
randomcolor = gen_random_color()
sensor.cells[line][col].set_color(randomcolor)
else:
sensor.cells[line][col].set_color(splitinput[-1])
# check for the sleep function
elif str_input.startswith("sleep") or str_input.startswith("slp"):
if len(str_input.strip().split()) < 2:
print("Invalid use for sleep command!")
else:
time.sleep(float(str_input.split()[1]))
# creates functions
elif str_input.startswith("func "):
if len(str_input.split()) != 2:
print("failed to create function. Template: \"func myfunc:\"")
elif isinfunc:
print("already in function. cannot create nested functions.")
else:
isinfunc = True
currentfunc = str_input.split()[1].replace(':', '')
funcs[currentfunc]=list()
# user wants to exit
elif str_input == "exit":
sys.exit(0)
# user called a function
elif str_input in funcs.keys():
for com in funcs[str_input]:
InterpretInput(com)
# user entered nothing
elif len(str_input.strip()) == 0:
return
# invalid input
else:
print(f"invalid command: {str_input}")
window.update()
def CreateGUI():
global sensor
global window
window = turtle.Screen()
window.screensize(800, 710)
#window.update()
filler = turtle.Turtle()
filler.speed(0)
filler.hideturtle()
filler.penup()
filler.pensize(4)
filler.fd(600)
init.draw_n_fill(Vector2(600, 600), '#474747', filler)
filler.penup()
filler.goto(500, 0)
init.draw_n_fill(Vector2(500, 500), 'green', filler)
window.update()
sensor.initializeAll()
def InteractiveSession():
global sensor
global window
global funcs
global isinfunc
global currentfunc
CreateGUI()
TakeInput()
def PostFileInteractiveSession():
global sensor
global funcs
global isinfunc
global currentfunc
TakeInput()
def TakeInput():
while True:
str_input = input("~>: ")
InterpretInput(str_input)
# sensor = Senser(5, 5)
# sensor.initialize_all()
if __name__ == '__main__':
if len(sys.argv) <= 1:
InteractiveSession()
else:
CreateGUI()
with open(sys.argv[1], 'r') as f:
for line in f.readlines():
InterpretInput(line.replace('\n', ''))
PostFileInteractiveSession()
``` |
{
"source": "1nd0m1nu3v01d3/Master",
"score": 2
} |
#### File: src/replit/__init__.py
```python
from . import web
from .audio import Audio
from .database import (
db,
Database,
AsyncDatabase,
make_database_proxy_blueprint,
start_database_proxy,
)
from .info import ReplInfo
info = ReplInfo()
# Backwards compatibility.
def clear() -> None:
"""Clear the terminal."""
print("\033[H\033[2J", end="", flush=True)
audio = Audio()
``` |
Subsets and Splits