ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a55805b279e21d0d0bdc998eff35c73c26598af
|
import os
import glob
import logging
import numpy as np
import mirnylib.genome
import hiclib.mapping
import mirnylib.h5dict
logging.basicConfig(level=logging.DEBUG)
if not os.path.exists('tmp'):
os.mkdir('tmp')
bowtie_bin_path = '../../bin/bowtie2/bowtie2'
bowtie_index_path = '../../bin/bowtie2/index/sacCer3'
fasta_path = '../../fasta/sacCer3'
genome_db = mirnylib.genome.Genome(fasta_path)
print "Map the insilico generated Hi-C reads..."
hiclib.mapping.iterative_mapping(
bowtie_path=bowtie_bin_path,
bowtie_index_path=bowtie_index_path,
fastq_path='./tmp/insilico_1.fastq',
out_sam_path='./tmp/insilico_1.bam',
min_seq_len=25,
len_step=5,
nthreads=4,
tmp_dir='./tmp',
bowtie_flags='--very-sensitive')
hiclib.mapping.iterative_mapping(
bowtie_path=bowtie_bin_path,
bowtie_index_path=bowtie_index_path,
fastq_path='./tmp/insilico_2.fastq',
out_sam_path='./tmp/insilico_2.bam',
min_seq_len=25,
len_step=5,
nthreads=4,
tmp_dir='./tmp',
bowtie_flags='--very-sensitive')
print "Done!"
print 'Parse the generated BAMs...'
lib = mirnylib.h5dict.h5dict('./tmp/insilico_mapped_reads.hdf5')
hiclib.mapping.parse_sam(
sam_basename1='./tmp/insilico_1.bam',
sam_basename2='./tmp/insilico_2.bam',
out_dict=lib,
genome_db=genome_db,
keep_ids=True,
enzyme_name='HindIII')
print 'Done!'
|
py
|
1a55806f6738c51fab42373d8536a538e4d5f6b7
|
from Constant_Speed_Constant_Angle import Constant_Speed_Constant_Angle
from Constant_Speed_Constant_Angle_Noise import Constant_Speed_Constant_Angle_Noise
from Constant_Speed_Constant_Rate import Constant_Speed_Constant_Rate
from Linear_Mach_Constant_Rate import Linear_Mach_Constant_Rate
from Constant_Throttle_Constant_EAS import Constant_Throttle_Constant_EAS
from Constant_Throttle_Constant_Mach import Constant_Throttle_Constant_Mach
from Constant_Throttle_Constant_Speed import Constant_Throttle_Constant_Speed
from Constant_Throttle_Linear_Speed import Constant_Throttle_Linear_Speed
from Constant_EAS_Constant_Rate import Constant_EAS_Constant_Rate
from Unknown_Throttle import Unknown_Throttle
|
py
|
1a55812baa9f52bab0a36ce6affb37aa4486cc57
|
from jsonrpc import ServiceProxy
access = ServiceProxy("http://127.0.0.1:30332")
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
|
py
|
1a558191ea3eec29752c01a2a7354c4b6b262834
|
import ast
import os
import shutil
from distutils.dir_util import copy_tree
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.models import load_model
import tensorflow_hub as hub
from tqdm import tqdm
class PseudoLabelGenerator:
"""
Class to generate pseudo labels for unlabeled images using a trained model.
Arguments:
model_path (str): location of the h5 tensorflow model to use
train_data_path (str): folder which holds training data
unlabeled_path (str): folder which holds unlabeled data
pseudo_data_path (str): folder to store training data and pseudo data combined
output_folder (str): folder to store outputs
csv_filename (str): name of csv file
"""
def __init__(self, model_path="model.h5", train_data_path="data/image_dataset/train",
unlabeled_path="data/unlabeled", pseudo_data_path="data/train_ssl",
output_folder="outputs", csv_filename="data.csv"):
self.train_data_path = train_data_path
self.unlabeled_path = unlabeled_path
self.pseudo_data_path = pseudo_data_path
self.output_folder = output_folder
self.csv_path = os.path.join(self.output_folder, csv_filename)
# Load model
self.model = load_model(model_path, compile=False, custom_objects={"KerasLayer": hub.KerasLayer})
print("Loaded model.")
# Make new output folder
if not os.path.exists(self.output_folder):
os.mkdir(self.output_folder)
# Make dictionary for classes and their index
self.class_names = sorted(os.listdir(self.train_data_path))
self.class_dict = {cat: i for (i, cat) in enumerate(self.class_names)}
def _load_img(self, path, target_size=(299, 299)):
"""
Load an image from a given path and normalize it
Arguments:
path (list): Input image path
target_size (tuple): Size of image
Returns:
np.array: Numpy array of the data
"""
# Read image
bits = tf.io.read_file(path)
image = tf.image.decode_jpeg(bits, channels=3)
# Resize
image = tf.image.resize(image, size=[*target_size])
image = tf.reshape(image, [*target_size, 3])
image = tf.cast(image, tf.uint8)
# Normalize [0, 1]
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = image.numpy()
return image
def _plot_data(self, predictions, name, output_path):
"""
Plots a bar plot and saves it to a file.
Arguments:
predictions (list): List of predictions
name (str): Title of the plot
output_path (str): Save file using this name
"""
predictions = sorted(predictions)
samples = list(range(len(predictions)))
plt.bar(samples, predictions, color='g')
plt.axhline(y=0.5, color='r', linestyle='--')
plt.title(name, size=16)
plt.xlabel("Number of unlabelled images", size=16)
plt.ylim([0.0, 1.0])
plt.ylabel("Probability", size=16)
plt.tick_params(labelright=True)
plt.savefig(output_path, dpi=100)
plt.clf() # clear buffer, otherwise plot overlap!
def plot_confidence_scores(self, per_class=True, overall=True):
"""
Generate bar plots for highest confidence predictions per class and overall and save them.
Arguments:
per_class (bool): make bar plots per class or not
overall (bool): make overall bar plot or not
"""
dt = pd.read_csv(self.csv_path)
dt['All Class Predictions List'] = dt['All Class Predictions'].apply(
lambda x: ast.literal_eval(x))
raw_predictions_ = dt[["Highest Confidence"]].values
raw_predictions = [pred[0] for pred in raw_predictions_]
raw_predictions_all_ = dt[['All Class Predictions List']].values
raw_predictions_all = [pred[0] for pred in raw_predictions_all_]
# Plot graph for highest confidence pseudo labels for each class
if per_class:
for idx, cat in enumerate(self.class_names):
predictions = [pred[idx] for pred in raw_predictions_all]
title = "Confidences for the class: {}".format(cat)
path = "{}/{}_confidences.png".format(
self.output_folder, cat)
self._plot_data(predictions, title, path)
# Plot graph for highest confidence pseudo labels for all unlabeled images
if overall:
self._plot_data(raw_predictions,
name="Highest confidence pseudo labels",
output_path="{}/highest_confidence_predictions.png".format(
self.output_folder))
def _make_dataset(self, filenames, batch_size):
def parse_image(filename):
image = tf.io.read_file(filename)
image = tf.image.decode_jpeg(image, channels=3)
image = tf.image.resize(image, [299, 299])
image = tf.cast(image, tf.uint8)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image
def configure_for_performance(ds):
ds = ds.batch(batch_size)
ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return ds
filenames_ds = tf.data.Dataset.from_tensor_slices(filenames)
images_ds = filenames_ds.map(parse_image,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds = configure_for_performance(images_ds)
return ds
def move_unlabeled_images(self, threshold=None):
"""
Split unlabeled images into folders of the training data based on pseudo labels. A new training dataset is
created with the labeled and pseudo labeled data.
Arguments:
threshold (float): Discard images with prediction below this confidence, default is None.
"""
# Copy the training/labeled data to the destination folder where
# we will also store the pseudo labels.
copy_tree(self.train_data_path, self.pseudo_data_path)
dt = pd.read_csv(self.csv_path)
filepaths = dt[["Filepaths"]].values
predicted_class = dt[["Predicted Class"]].values
raw_predictions = dt[["Highest Confidence"]].values
for proba, y, path in zip(raw_predictions, predicted_class, filepaths):
# The results variable should be the same as the class category
for class_name, index in self.class_dict.items():
if threshold:
# For thresholding predictions
if index == y and proba >= threshold:
shutil.copy(os.path.join(self.unlabeled_path, str(path[0])),
os.path.join(self.pseudo_data_path, class_name))
else:
# For hard predictions
if index == y:
shutil.copy(os.path.join(self.unlabeled_path, str(path[0])),
os.path.join(self.pseudo_data_path, class_name))
print("Moved unlabeled images to their pseudo label categories.")
def generate_pseudolabel_data(self, plot_confidences=False, threshold=None, move_images=False, batch_size=32):
""" Use trained model to make pseudo labels and save them into a csv file. Also possible to plot the results
and move the unlabeled images directly to the category corresponding to their pseudo label.
Arguments:
plot_confidences (boolean): Whether to plot confidence graphs for raw confidences and per class confidences.
threshold (float): Discard images with prediction below this confidence, default is None. Only used
if move_images is True.
move_images (bool): Move images into categories or not
batch_size (int): Batch size while making predictions
Returns:
pseudo_data_path: A folder with both labeled and pseudo labeled images.
"""
print("Generating pseudo labels...")
# Generate pseudo labels
unlabeled_image_paths = os.listdir(self.unlabeled_path)
print("There are {} unlabeled images.".format(
len(unlabeled_image_paths)))
raw_predictions = [] # single confidence value of predicted class
predicted_class = [] # predicted class index
raw_predictions_all = [] # confidences for all classes
unlabeled_filenames = [os.path.join(self.unlabeled_path,
path) for path in unlabeled_image_paths]
ds = self._make_dataset(unlabeled_filenames, batch_size)
y_preds = []
for batch in tqdm(ds):
y_preds_ = self.model.predict(batch)
y_preds.extend(list(y_preds_))
for y_pred in y_preds:
y = np.argmax(y_pred)
# Get probability score
proba = y_pred[y]
predicted_class.append(y)
raw_predictions.append(proba)
raw_predictions_all.append(list(y_pred))
raw_predictions_paths = [path for path in unlabeled_image_paths]
# 'Pseudo Class Names': pseudo_class_names,
print("Saving CSV with pseudo predictions.")
data = {'Filepaths': raw_predictions_paths,
'Predicted Class': predicted_class,
'Highest Confidence': raw_predictions,
'All Class Predictions': raw_predictions_all}
df = pd.DataFrame(data)
df.to_csv(self.csv_path, index=False)
# move pseudo labeled images
if move_images:
self.move_unlabeled_images(threshold=threshold)
if plot_confidences:
print("Plotting data.")
self.plot_confidence_scores()
|
py
|
1a55837547f5d14af0ac37de5769befb381550dd
|
import subprocess
import tempfile
import inspect
import random
import shutil
import time
import json
import sys
import os
KWARGS = "k"
RESULT = "r"
VALUE = "v"
ARGS = "a"
# !!! Dekoratörde, fonksiyon okunduktan sonra dosyalarını silebilirsin aslında
# Ya da aradan belli bir süre geçtiyse, dosyayı sil gitsin, loop'tan silebilirisn
"""
Dosyalama Şekli;
+/tmp
|---> -/dirio
|---> +/12345
|---> -/67891
|-----> __main__.py -> Sadece ilk çalıştırılırken vardır.
|-----> +/func1
|-----> -/func2
| |-------> 11
| |-------> 12
| |-------> 13
| |-> {"a": ["Func args"], "k": {"Func kwargs"}, "r": "Dönüş değeri"}
|-----> variable1
|-----> variable2
|-----> variable3
|-------> "Değişkendeki değer"
"""
# import fcntl
#
#
# # ################################ File LOCK - Henüz kullanmadık
# # https://stackoverflow.com/questions/4843359/python-lock-a-file
# def lock_to_file(filename):
# """ acquire exclusive lock file access """
# locked_file_descriptor = open(filename, 'w+')
# fcntl.lockf(locked_file_descriptor, fcntl.LOCK_EX)
# return locked_file_descriptor
#
#
# def lock_to_release(locked_file_descriptor):
# """ release exclusive lock file access """
# locked_file_descriptor.close()
#
#
# # ##############################################################
def new_dir(tempdir, module, class_name, args, kwargs):
"""/Tempdir/353464325"""
# Dizini oluşturuyoruz
dir_path = os.path.join(tempdir or tempfile.gettempdir(), "dirio")
if not os.path.exists(dir_path):
os.mkdir(dir_path)
no = 111
while str(no) in os.listdir(dir_path):
no = random.randint(111, 9999999999)
new_path = os.path.join(dir_path, str(no))
os.mkdir(new_path)
# #######################
# #######################
# Scripti oluşturuyoruz
# Önce bu dosya okunur.
with open(__file__) as f:
script_body = f.read()
# İçe aktarılacak modülün yolu bulunur.
module_name = os.path.basename(module).split(".")[0]
module_path = os.path.dirname(module)
if module_name in ("__init__", "__main__"):
module_name = os.path.basename(module_path)
module_path = os.path.dirname(module_path)
# Scriptin parçaları oluşturulur
# script_head = f"import sys\nsys.path.append('{module_path}')\nfrom {module_name} import {class_name}"
script_head = f"""
import sys
sys.path.append('{module_path}')
from {module_name} import {class_name}
"""
# script_footer = f"new = Dirio(target={class_name}, args={args}, kwargs={kwargs}, worker=True)\nnew._dr_loop()"
script_footer = f"""
try:
new = Dirio(target={class_name}, args={args}, kwargs={kwargs}, worker=True)
new._dr_loop()
except:
pass
# Çıkışta dosyaları sil
dirname = os.path.dirname(__file__)
if os.path.exists(dirname):
shutil.rmtree(dirname)
sys.exit()
"""
script = "\n".join([script_head, script_body, script_footer])
# Script yazılır
with open(os.path.join(new_path, "__main__.py"), "w") as f:
f.write(script)
# Burada da Process olarak başlatsan iyi olur
subprocess.Popen([
sys.executable,
new_path
],
# close_fds=True
)
print("Dirio -> New Path ->", new_path)
return new_path
def check_type(value):
tip = type(value)
check = True
if tip in (dict, list, tuple, int, str, float, bool, type(None)):
if tip is dict:
for k, v in value.items():
if not (check_type(k) and check_type(v)):
return False
elif tip in (list, tuple):
for i in value:
if not check_type(i):
return False
else:
return False
return check
def set_decorator(self):
# Class'daki tüm değerleri alıyoruz
for attr in self.__dict__:
# Bu isimlerdeyse, dekoratör ekleme, boşver.
# if attr in ("__getattribute__", "__setattr__", "__new__"):
if (attr.startswith("__") and attr.endswith("__")) or attr in ("dr_terminate", "dr_code",
"dr_bind", "dr_binds_check", "dr_isactive"):
continue
# Eğer çağrılabilir fonksiyon ise dekorator ekliyoruz
attribute = getattr(self, attr)
if callable(attribute):
setattr(self, attr, get_decorator(self, attribute))
def get_result(path_code, dr_wait):
start_time = time.time()
wait_time = 1
# Cevabı okurken bekle
while wait_time:
if os.path.exists(path_code):
try:
with open(path_code) as f:
data = json.load(f)
if RESULT in data:
return data.get(RESULT)
except:
pass
# -1 ise cevap gelene kadar bekle
# 0 ise sadece bir kere kontrol et
# 5 gibi değer ise, 5 sn kadar bekle
if dr_wait >= 0:
wait_time = time.time() - start_time < dr_wait
return None
def get_decorator(self, func):
def wrapper(*args, **kwargs):
# kwargs'ın içinde,
# dr_code=True varsa; Okuma kodu döndürür.
# dr_code=2345 gibi ;
# İstemci ise, bu kodun dönüşü varsa, onu döndürür. Yoksa None döndürsün.
# Sunucu ise, o değerdeki dosyaya RESULT değerini kayıt yap demek oluyor
# Hiçbiri yoksa ; En son herhangi bir cevabı döndürür
dr_code = kwargs.pop("dr_code", False)
dr_wait = kwargs.pop("dr_wait", 0)
# Fonksiyonun klasörü
path = os.path.join(self._dr_dir, func.__name__)
# Yoksa oluştur
if not os.path.exists(path):
os.mkdir(path)
# Temel metodlar derhal işletilir. !!! Burayı kullanmak istiyorsan, set_decorator kısmını da düzenle
# if func.__name__.startswith("__") and func.__name__.endswith("__"):
# return func(*args, **kwargs)
# İstemci ise veya self.metod değilse, (@class veya @static ise) baştaki self parametresini sil
if not self._dr_active or "self" not in inspect.getfullargspec(func).args:
args = args[1:]
# ################################
# İstemci ise ve Parametreler uygunsa, dosyaya kaydeder.
if not self._dr_active and check_type(args) and check_type(kwargs):
# dr_code -> int -> Bu kodla olan veri varsa döndür. Belirtilen süre kadar cevabı bekle
if type(dr_code) is int and dr_code > 1:
return get_result(os.path.join(path, str(dr_code)), dr_wait)
# Func dizinindeki dosyaların isimlerini int olarak alır ve
# ["1", "2", ... ] String listesinde en büyük sayıyı verir. Yoksa 10 değerini verir
son_code = self._dr_last_code
new_code = son_code + 1
full_path = os.path.join(path, str(new_code))
while os.path.exists(full_path):
new_code += 1
full_path = os.path.join(path, str(new_code))
# Datayı dosyaya yaz
with open(full_path, 'w') as f:
json.dump({ARGS: args, KWARGS: kwargs}, f)
self._dr_last_code = new_code
# Cevabı bu süre kadar bekle ve dön
if dr_wait:
return get_result(full_path, dr_wait)
# dr_code -> True -> Kodu döndür
if dr_code is True:
return new_code
# dr_code -> False -> Default, Son dosyada cevap varsa döndür
if son_code != 10:
try:
with open(os.path.join(path, str(son_code))) as f:
return json.load(f).get(RESULT)
except:
pass
# Hiçbiri uymuyorsa, boş dön
return None
# ################################
# Kod varsa datayı koddaki dosyaya yaz. Tabi tipler uygunsa yaz.
if type(dr_code) is str:
file = os.path.join(path, dr_code)
try:
with open(file) as f:
data = json.load(f)
except:
return
# Clas fonksiyonu veya self fonksiyon olmasına göre fazla parametre hatası verebildiğinden böyle yapıldı
if "self" not in inspect.getfullargspec(func).args:
result = func(*data.get(ARGS, ()), **data.get(KWARGS, {}))
else:
result = func(args[0], *data.get(ARGS, ()), **data.get(KWARGS, {}))
data[RESULT] = result if check_type(result) else None
with open(file, "w") as f:
json.dump(data, f)
# Zaman kaydedicide, fonksiyonun ismi yoksa, oluştur
if func.__name__ not in self._dr_last_times:
self._dr_last_times[func.__name__] = {}
# Func dosyasını değiştirdiğimiz için, değişim zamanını kaydediyoruz ki, sonradan başkası değişti sanılmasın
self._dr_last_times[func.__name__][dr_code] = os.stat(file).st_mtime
else:
# Sunucuysa, direkt fonksiyonu işle
result = func(*args, **kwargs)
return result
return wrapper
class Dirio:
_dr_inwork = False
_dr_binds = {}
def __init__(self, target=None, args=(), kwargs={}, tempdir="", keeperiod=10, looperiod=.05, worker=False):
"""
:param target: class: Hedef Class
:param args: tuple: Class'ın argümanları
:param kwargs: dict: Class'ın keyword'lü argümanları
:param tempdir: str: Temporary klasörü. Girilmediyse, standart sistemdeki klasör kullanılır.
:param keeperiod: int: Geçmişi tutma süresi. Default: 10 sn boyunca geçmişi saklar.
:param looperiod: int: Sunucu için, döngüde bekleme süresi. Küçük olursa işlemciden, büyük olursa işlemden zarar
:param worker: bool: Read Only. Değiştirme. Sınıfın kendine has kullanımına dahildir.
"""
self._dr_bind = {}
self._dr_active = worker
self._dr_last_code = 10
self._dr_last_times = {}
self._dr_keep_period = keeperiod
self._dr_loop_period = looperiod
# Önce kopyalıyoruz, Çünkü üstünde değişiklik yaptığımızda kalıcı olmasın
target = type(target.__name__, target.__bases__, dict(target.__dict__))
set_decorator(target)
if worker:
# Sunucu kısmıdır. Bu kısım sadece temp klasöründen başlatıldığında çalışır
self._dr_dir = os.path.dirname(__file__)
else:
# İstemci kısmıdır.Sunucu oluşturulur ve başlatılır
self._dr_dir = new_dir(tempdir, inspect.getfile(target), target.__name__, args, kwargs)
# target = type(f'gecis.{target.__name__}', tuple(target.__bases__), dict(target.__dict__))
# Dirio özelliklerini diğer sınıfa ekliyoruz
for attr in self.__dict__:
if attr.startswith("_dr_") or attr.startswith("dr_"): # or attr in ("__getattribute__", "__setattr__"):
setattr(target, attr, self.__getattribute__(attr))
# Kendimizi, Clasın kopyasına çeviriyoruz
self.__class__ = type(f'dirio.{target.__name__}', tuple([Dirio, target]), dict(self.__dict__))
self._dr_inwork = True
super().__init__(*args, **kwargs)
def __getattribute__(self, name):
# _dr_ ile başlıyorsa veya __xxx__ gibi bir değişkense hemen döndürülür
if name.startswith("_dr_") or (name.startswith("__") and name.endswith("__")):
return super().__getattribute__(name)
in_class = name in dir(self)
# print("__getattribute__\t<--\t\t\t", name)
# Fonksiyon ise, direkt döndürülür.
###############
if in_class:
value = super().__getattribute__(name)
if callable(value):
return value
# Değişken ise;
###############
# Değer dosyada varsa, oradan okunur
if name in os.listdir(self._dr_dir):
with open(os.path.join(self._dr_dir, name)) as f:
value = json.load(f).get(VALUE)
return value
if in_class:
value = super().__getattribute__(name)
# Demekki dosyada yok ki buraya kadar geldik, dosyaya da kaydedelim.
self.__setattr__(name, value)
return value
return lambda *args, **kwargs: None
def __setattr__(self, key, value):
# print("__setattribute__\t\t\t-->\t", key, value)
# Eğer value, çağrılabilir ise, ona özelliklerimizi ver.
# Value uygunsa, key isimli dosyaya yaz
# İstemci ve sunucu için de geçerli
if self._dr_inwork:
file = os.path.join(self._dr_dir, key)
if check_type(value):
with open(file, "w") as f:
json.dump({VALUE: value}, f)
else:
# Eğer kaydedilemeyen bir tip ise, dosyada var olanı da sil ki, çağırırken sorun yaşanmasın
if os.path.exists(file):
os.remove(file)
# !!! Aslında değişkenler için bu işleme gerek yok. Sadece fonksiyonlar için yapsak yeterli olur
# Eğer Sunucu ise, dosyanın son değişme zamanını güncelle ki, onu değişti zannetmesin.
# if self._dr_active:
# self._dr_last_times[key] = os.stat(file).st_mtime
super().__setattr__(key, value)
def _dr_loop(self):
# Script dosyasını siliyoruz
if os.path.exists(__file__):
os.remove(__file__)
# Kaydedilmiş değerler varsa önce onları okur
# Daha sonra tüm değerleri dosyaya kaydet
for i in dir(self):
if not (i.startswith("__") and i.endswith("__")):
getattr(self, i)
# Böyle yapıyoruz ki, çağırırken her seferinde class.getattr'e yük olmasın
_dr_dir = self._dr_dir
_dr_last_times = self._dr_last_times
while os.path.exists(_dr_dir):
# Dizindeki, fonksiyon klasörlerinin isimleri alınır.
func_dirs = [i for i in os.listdir(_dr_dir) if os.path.isdir(os.path.join(_dr_dir, i))]
# Tüm fonk dizinlerini gez
for func_dir in func_dirs:
func_full_path = os.path.join(_dr_dir, func_dir)
# last'ta fonk yoksa al
if func_dir not in _dr_last_times:
_dr_last_times[func_dir] = {}
lasts = _dr_last_times[func_dir]
for func_code in os.listdir(func_full_path):
if not func_code.isdigit():
continue
func_code_full_path = os.path.join(func_full_path, func_code)
st = os.stat(func_code_full_path).st_mtime
# Daha önce çalıştırdıysak ve son çalışma zamanı aynıysa geç
if func_code in lasts and st == lasts.get(func_code):
# Saklama zamanı geçtiyse, geçmiş klasörüne aktar ve last_timesden'de kaldır
# if time.time() - st > self._dr_keep_period:
# pass
# # lasts'dan kaldır.
continue
# İlk defa çağrılıyorsa veya son çalışma zamanı farklıysa yap
# Fonksiyon işlenir ve dönüşü kaydedilir. Üstelik zamanı da..
# print("Fonksiyonu çağıtıruz", func_dir, func_code)
getattr(self, func_dir)(dr_code=func_code)
# self.deger += 5
# print(self.deger, self)
# print("Bu da dönüyor", getattr(self, "deger"))
time.sleep(self._dr_loop_period)
def dr_terminate(self):
"""İşlemi bitirir"""
if os.path.exists(self._dr_dir):
shutil.rmtree(self._dr_dir)
def dr_code(self, code, wait=0):
"""Dönüşü koddan direkt olarak okumayı sağlar."""
if type(code) is int:
code = str(code)
# Tüm fonksiyon klasörlerini gezer, eğer içinde elimizdeki koddan dosya varsa onu okur
for func_name in [j for j in os.listdir(self._dr_dir) if os.path.isdir(os.path.join(self._dr_dir, j))]:
func_path = os.path.join(self._dr_dir, func_name)
if code in os.listdir(func_path):
return get_result(os.path.join(func_path, code), wait)
return None
def dr_bind(self, code, func, args=(), kwargs={}):
"""Girilen kod ile sonuç alındığında, 'func'u çağırır. Parametrelerini de girer.
Sonuçları arayabilmesi için, arada 'dr_binds_check'in çalıştırılması gerekir.
Fonksiyonun alacağı ilk parametre, code'un dönüş değeri olmalı"""
self._dr_binds[code] = [func, args, kwargs]
def dr_binds_check(self):
"""Sonuçları kontrol eder. Sonuçlar geldiyse, Bind'leri çalıştırır"""
event = False
for code, vals in self._dr_binds.copy().items():
result = self.dr_code(code)
if result is not None:
func = vals[0]
args = vals[1]
kwargs = vals[2]
func(*args, **kwargs, result=result)
self._dr_binds.pop(code)
event = True
return event
def dr_isactive(self):
return self._dr_inwork and os.path.exists(self._dr_dir)
|
py
|
1a55844a26782bffb39704e4b2876c190d60e2ae
|
import urllib.request
import urllib.error
import urllib.parse
import json
from arbitrage.public_markets.market import Market
class GDAX(Market):
def __init__(self, currency, code):
super().__init__(currency)
self.code = code
self.update_rate = 30
def update_depth(self):
url = 'https://api.gdax.com/products/%s/book?level=2' % self.code
req = urllib.request.Request(url, headers={
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "*/*",
"User-Agent": "curl/7.24.0 (x86_64-apple-darwin12.0)"})
res = urllib.request.urlopen(req)
depth = json.loads(res.read().decode('utf8'))
self.depth = self.format_depth(depth)
def sort_and_format(self, l, reverse=False):
l.sort(key=lambda x: float(x[0]), reverse=reverse)
r = []
for i in l:
r.append({'price': float(i[0]), 'amount': float(i[1])})
return r
def format_depth(self, depth):
bids = self.sort_and_format(depth['bids'], True)
asks = self.sort_and_format(depth['asks'], False)
return {'asks': asks, 'bids': bids}
|
py
|
1a5584f05fa2ba17f078dbe54d616e71aca0f0e5
|
#!/usr/bin/python
# List comprehensions
my_list = [1,2,3,4,5,6,7,8,9]
print my_list
squares = [num * num for num in my_list]
print type(squares)
print squares
|
py
|
1a5585c1db9ebb00e8d766e604e600c7729a0dab
|
"""Tensorflow trainer class."""
import logging
import math
import os
from typing import Callable, Dict, Optional
import numpy as np
import tensorflow as tf
from .modeling_tf_utils import TFPreTrainedModel, shape_list
from .optimization_tf import GradientAccumulator, create_optimizer
from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput
from .training_args_tf import TFTrainingArguments
logger = logging.getLogger(__name__)
class TFTrainer:
model: TFPreTrainedModel
args: TFTrainingArguments
# something similar to a PT Dataset.
# This is just temporary before to have
# a framework-agnostic approach for datasets.
train_dataset: Optional[tf.data.Dataset]
eval_dataset: Optional[tf.data.Dataset]
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None
prediction_loss_only: bool
def __init__(
self,
model: TFPreTrainedModel,
args: TFTrainingArguments,
train_dataset: Optional[tf.data.Dataset] = None,
eval_dataset: Optional[tf.data.Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
prediction_loss_only=False,
):
self.model = model
self.args = args
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.prediction_loss_only = prediction_loss_only
self.gradient_accumulator = GradientAccumulator()
self._setup_training()
def _setup_training(self) -> None:
"""
Setup the different steps to train a model:
- check if all the data are given
- create the proper strategy
- create the features
- prepare the model settings
"""
self._prepare_dataset()
with self.args.strategy.scope():
self._create_optimizer()
_ = self.optimizer.iterations
self._set_loss_and_metric()
self._create_checkpoint_manager()
self._create_summary_writer()
def _set_loss_and_metric(self) -> None:
"""
Create the training loss and metric with their name. Allowed names are those listed
in the Tensorflow documentation and those contained in the transformers library.
"""
try:
self.loss = tf.keras.losses.get(
{
"class_name": self.args.loss_name,
"config": {"from_logits": True, "reduction": tf.keras.losses.Reduction.NONE},
}
)
except TypeError:
self.loss = tf.keras.losses.get(
{"class_name": self.args.loss_name, "config": {"reduction": tf.keras.losses.Reduction.NONE}}
)
def _create_summary_writer(self) -> None:
"""
Create a summary writer to be able to read the logs in Tensorboard.
"""
self.writer = tf.summary.create_file_writer(self.args.logging_dir)
def _prepare_dataset(self) -> None:
"""
Prepare the training, validation and test data.
"""
if self.train_dataset is not None:
self.num_train_examples = self.train_dataset.reduce(tf.constant(0), lambda x, _: x + 1).numpy()
if self.args.max_steps > 0:
self.train_steps = self.args.max_steps
else:
self.train_steps: int = math.ceil(self.num_train_examples / self.args.train_batch_size)
self.train_dataset = (
self.train_dataset.cache()
.shuffle(self.num_train_examples)
.batch(self.args.train_batch_size)
.prefetch(tf.data.experimental.AUTOTUNE)
)
if self.args.max_steps > 0:
self.train_dataset = self.train_dataset.repeat(-1)
self.train_dataset = self.args.strategy.experimental_distribute_dataset(self.train_dataset)
else:
self.train_steps = 0
if self.eval_dataset is not None:
self.eval_dataset = (
self.eval_dataset.batch(self.args.eval_batch_size).cache().prefetch(tf.data.experimental.AUTOTUNE)
)
self.eval_dataset = self.args.strategy.experimental_distribute_dataset(self.eval_dataset)
def _create_optimizer(self) -> None:
"""
Create the training optimizer with its name. Allowed names are those listed
in the Tensorflow documentation and those contained in the transformers library.
"""
if self.args.optimizer_name == "adamw":
self.optimizer = create_optimizer(
self.args.learning_rate, self.train_steps, self.args.warmup_steps, self.args.end_lr
)
else:
try:
self.optimizer = tf.keras.optimizers.get(
{
"class_name": self.args.optimizer_name,
"config": {"learning_rate": self.args.learning_rate, "epsilon": self.args.adam_epsilon},
}
)
except TypeError:
# This is for the case where the optimizer is not Adam-like such as SGD
self.optimizer = tf.keras.optimizers.get(
{"class_name": self.args.optimizer_name, "config": {"learning_rate": self.args.learning_rate}}
)
logger.info("Created an/a {} optimizer".format(self.optimizer))
def _create_checkpoint_manager(self, max_to_keep: int = 5, load_model: bool = True) -> None:
"""
Create a checkpoint manager in order to be able to make the training
fault-tolerant.
Args:
max_to_keep: the maximum number of checkpoints to keep in the checkpoint path.
load_model: if we want to start the training from the latest checkpoint.
"""
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)
self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, PREFIX_CHECKPOINT_DIR, max_to_keep=max_to_keep)
if load_model:
ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial()
@tf.function
def _evaluate_steps(self, per_replica_features, per_replica_labels):
"""
One step evaluation across replica.
Args:
per_replica_features: the batched features.
per_replica_labels: the batched labels.
Returns:
The loss corresponding to the given batch.
"""
per_replica_loss, per_replica_logits = self.args.strategy.experimental_run_v2(
self._run_model, args=(per_replica_features, per_replica_labels, False)
)
try:
reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0)
except ValueError:
reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None)
return reduced_loss, per_replica_logits
def _prediction_loop(
self, dataset: tf.data.Dataset, description: str, prediction_loss_only: Optional[bool] = None
) -> PredictionOutput:
logger.info("***** Running %s *****", description)
logger.info(" Batch size = %d", self.args.eval_batch_size)
label_ids: np.ndarray = None
preds: np.ndarray = None
step: int = 1
for features, labels in dataset:
step = tf.convert_to_tensor(step, dtype=tf.int64)
loss, logits = self._evaluate_steps(features, labels)
loss = tf.reduce_mean(loss)
if not prediction_loss_only:
if self.args.n_gpu > 1:
for val in logits.values:
if preds is None:
preds = val.numpy()
else:
preds = np.append(preds, val.numpy(), axis=0)
for val in labels.values:
if label_ids is None:
label_ids = val.numpy()
else:
label_ids = np.append(label_ids, val.numpy(), axis=0)
else:
if preds is None:
preds = logits.numpy()
else:
preds = np.append(preds, logits.numpy(), axis=0)
if label_ids is None:
label_ids = labels.numpy()
else:
label_ids = np.append(label_ids, labels.numpy(), axis=0)
step += 1
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
metrics["loss"] = loss.numpy()
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def evaluate(
self, eval_dataset: Optional[tf.data.Dataset] = None, prediction_loss_only: Optional[bool] = None
) -> Dict[str, float]:
"""
Prediction/evaluation loop, shared by `evaluate()` and `predict()`.
"""
if eval_dataset is None:
eval_dataset = self.eval_dataset
output = self._prediction_loop(eval_dataset, description="Evaluation")
return output.metrics
def train(self) -> None:
"""
Train method to train the model.
"""
if self.args.debug:
tf.summary.trace_on(graph=True, profiler=True)
self.gradient_accumulator.reset()
iterations = self.optimizer.iterations
if iterations.numpy() > 0:
logger.info("Start the training from the last checkpoint")
start_epoch = (iterations.numpy() // self.train_steps) + 1
else:
start_epoch = 1
tf.summary.experimental.set_step(iterations)
epochs = 1 if self.args.max_steps > 0 else self.args.num_train_epochs
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_train_examples)
logger.info(" Num Epochs = %d", epochs)
logger.info(" Total optimization steps = %d", self.train_steps)
for epoch in range(start_epoch, int(epochs + 1)):
for training_loss in self._training_steps():
step = iterations.numpy()
if self.args.debug:
with self.writer.as_default():
tf.summary.scalar("loss", training_loss, step=step)
if step == 1 and self.args.debug:
with self.writer.as_default():
tf.summary.trace_export(name="training", step=step, profiler_outdir=self.args.logging_dir)
if self.args.evaluate_during_training and step % self.args.eval_steps == 0:
logs = {}
results = self.evaluate()
for key, value in results.items():
eval_key = "eval_{}".format(key)
logs[eval_key] = value
if callable(self.optimizer.learning_rate):
logs["learning_rate"] = self.optimizer.learning_rate(step).numpy()
else:
logs["learning_rate"] = self.optimizer.learning_rate.numpy()
logger.info("Epoch {} Step {} Validation Metrics {}".format(epoch, step, logs))
with self.writer.as_default():
for k, v in logs.items():
tf.summary.scalar(k, v, step=step)
if step % self.args.logging_steps == 0:
logger.info("Epoch {} Step {} Train Loss {:.4f}".format(epoch, step, training_loss.numpy()))
if step % self.args.save_steps == 0:
ckpt_save_path = self.model.ckpt_manager.save()
logger.info("Saving checkpoint for step {} at {}".format(step, ckpt_save_path))
if step % self.train_steps == 0:
break
def _training_steps(self):
"""
Returns a generator over training steps (i.e. parameters update).
"""
for i, loss in enumerate(self._accumulate_next_gradients()):
if i % self.args.gradient_accumulation_steps == 0:
self._apply_gradients()
yield loss
@tf.function
def _apply_gradients(self):
"""Applies the gradients (cross-replica)."""
self.args.strategy.experimental_run_v2(self._step)
def _step(self):
"""Applies gradients and resets accumulation."""
gradient_scale = self.gradient_accumulator.step * self.args.strategy.num_replicas_in_sync
gradients = [
gradient / tf.cast(gradient_scale, gradient.dtype) for gradient in self.gradient_accumulator.gradients
]
gradients = [(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients]
vars = self.model.trainable_variables
if self.args.mode == "token-classification":
vars = [var for var in self.model.trainable_variables if "pooler" not in var.name]
self.optimizer.apply_gradients(list(zip(gradients, vars)))
self.gradient_accumulator.reset()
def _accumulate_next_gradients(self):
"""Accumulates the gradients from the next element in dataset."""
iterator = iter(self.train_dataset)
@tf.function
def _accumulate_next():
per_replica_features, per_replica_labels = next(iterator)
return self._accumulate_gradients(per_replica_features, per_replica_labels)
while True:
try:
yield _accumulate_next()
except tf.errors.OutOfRangeError:
break
def _accumulate_gradients(self, per_replica_features, per_replica_labels):
"""Accumulates the gradients across all the replica."""
per_replica_loss = self.args.strategy.experimental_run_v2(
self._forward, args=(per_replica_features, per_replica_labels)
)
try:
reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, axis=0)
except ValueError:
reduced_loss = self.args.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_loss, None)
return reduced_loss
def _forward(self, features, labels):
"""Forwards a training example and accumulates the gradients."""
per_example_loss, _ = self._run_model(features, labels, True)
vars = self.model.trainable_variables
if self.args.mode == "token-classification":
vars = [var for var in self.model.trainable_variables if "pooler" not in var.name]
gradients = self.optimizer.get_gradients(per_example_loss, vars)
self.gradient_accumulator(gradients)
return per_example_loss
def _run_model(self, features, labels, training):
"""
Computes the loss of the given features and labels pair.
Args:
features: the batched features.
labels: the batched labels.
training: run the model in training mode or not
"""
if self.args.mode == "sequence-classification" or self.args.mode == "token-classification":
logits = self.model(features, training=training)[0]
else:
logits = self.model(features, training=training)
if self.args.mode == "token-classification":
active_loss = tf.reshape(labels, (-1,)) != -1
reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
loss = self.loss(labels, reduced_logits)
else:
loss = self.loss(labels, logits)
loss += sum(self.model.losses) * (1.0 / self.args.n_gpu)
return loss, logits
def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:
"""
Run prediction and return predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in evaluate().
Args:
test_dataset: something similar to a PT Dataset. This is just
temporary before to have a framework-agnostic approach for datasets.
"""
test_dataset = test_dataset.batch(self.args.eval_batch_size)
test_dataset = self.args.strategy.experimental_distribute_dataset(test_dataset)
return self._prediction_loop(test_dataset, description="Prediction")
def save_model(self) -> None:
"""
Save the pretrained model and create a Tensorflow saved model.
"""
logger.info("Saving model in {}".format(self.args.output_dir))
path = os.path.join(self.args.output_dir, "saved_model")
logger.info("Saving model in {}".format(path))
os.makedirs(path, exist_ok=True)
self.model.save_pretrained(self.args.output_dir)
|
py
|
1a5585d406e3698bb25e97c27930b66da802634b
|
class TableFormat:
"""This class handles all related things to the visual presentation of a table."""
def __init__(self):
self._widths = []
self._columns = []
self._rows = []
def set(self, columns):
self._columns = columns
self._widths = [len(column) + 2 for column in columns]
def add_row(self, rows):
rows = [str(row) for row in rows]
self._rows.append(rows)
for index, row in enumerate(rows):
width = len(row) + 2
if width > self._widths[index]:
self._widths[index] = width
def add(self, rows):
for row in rows:
self.add_row(row)
def render(self):
"""Renders a table in rST format for graphical presentation in Discord chat."""
table = '+' + ('+'.join('-' * width for width in self._widths)) + '+'
to_draw = [table]
def get(results):
element = '|'.join(f'{result:^{self._widths[index]}}' for index, result in enumerate(results))
return f'|{element}|'
to_draw.append(get(self._columns))
to_draw.append(table)
for row in self._rows:
to_draw.append(get(row))
to_draw.append(table)
return '\n'.join(to_draw)
|
py
|
1a5586d1c5a7f10df37a99f67c54958c3d332138
|
"""Mobjects generated from an SVG file."""
__all__ = ["SVGMobject", "VMobjectFromSVGPathstring", "string_to_numbers"]
import itertools as it
import re
import os
import string
import warnings
from xml.dom import minidom
from ... import config
from ...constants import *
from ...mobject.geometry import Circle
from ...mobject.geometry import Rectangle
from ...mobject.geometry import RoundedRectangle
from ...mobject.types.vectorized_mobject import VGroup
from ...mobject.types.vectorized_mobject import VMobject
from ...utils.color import *
def string_to_numbers(num_string):
num_string = num_string.replace("-", ",-")
num_string = num_string.replace("e,-", "e-")
return [float(s) for s in re.split("[ ,]", num_string) if s != ""]
class SVGMobject(VMobject):
"""A SVGMobject is a Vector Mobject constructed from an SVG (or XDV) file.
SVGMobjects are constructed from the XML data within the SVG file
structure. As such, subcomponents from the XML data can be accessed via
the submobjects attribute. There is varying amounts of support for SVG
elements, experiment with SVG files at your own peril.
Examples
--------
.. code-block:: python
class Sample(Scene):
def construct(self):
self.play(
FadeIn(SVGMobject("manim-logo-sidebar.svg"))
)
Parameters
--------
file_name : :class:`str`
The file's path name. When possible, the full path is preferred but a
relative path may be used as well. Relative paths are relative to the
directory specified by the `--assets_dir` command line argument.
Other Parameters
--------
should_center : :class:`bool`
Whether the SVGMobject should be centered to the origin. Defaults to `True`.
height : :class:`float`
Specify the final height of the SVG file. Defaults to 2 units.
width : :class:`float`
Specify the width the SVG file should occupy. Defaults to `None`.
unpack_groups : :class:`bool`
Whether the hierarchies of VGroups generated should be flattened. Defaults to `True`.
stroke_width : :class:`float`
The stroke width of the outer edge of an SVG path element. Defaults to `4`.
fill_opacity : :class:`float`
Specifies the opacity of the image. `1` is opaque, `0` is transparent. Defaults to `1`.
"""
def __init__(
self,
file_name=None,
should_center=True,
height=2,
width=None,
unpack_groups=True, # if False, creates a hierarchy of VGroups
stroke_width=DEFAULT_STROKE_WIDTH,
fill_opacity=1.0,
**kwargs,
):
self.file_name = file_name or self.file_name
self.ensure_valid_file()
self.should_center = should_center
self.height = height
self.width = width
self.unpack_groups = unpack_groups
VMobject.__init__(
self, fill_opacity=fill_opacity, stroke_width=stroke_width, **kwargs
)
self.move_into_position()
def ensure_valid_file(self):
"""Reads self.file_name and determines whether the given input file_name
is valid.
"""
if self.file_name is None:
raise Exception("Must specify file for SVGMobject")
if os.path.exists(self.file_name):
self.file_path = self.file_name
return
relative = os.path.join(os.getcwd(), self.file_name)
if os.path.exists(relative):
self.file_path = relative
return
possible_paths = [
os.path.join(config.get_dir("assets_dir"), self.file_name),
os.path.join(config.get_dir("assets_dir"), self.file_name + ".svg"),
os.path.join(config.get_dir("assets_dir"), self.file_name + ".xdv"),
self.file_name,
self.file_name + ".svg",
self.file_name + ".xdv",
]
for path in possible_paths:
if os.path.exists(path):
self.file_path = path
return
error = f"From: {os.getcwd()}, could not find {self.file_name} at either of these locations: {possible_paths}"
raise IOError(error)
def generate_points(self):
"""Called by the Mobject abstract base class. Responsible for generating
the SVGMobject's points from XML tags, populating self.mobjects, and
any submobjects within self.mobjects.
"""
doc = minidom.parse(self.file_path)
self.ref_to_element = {}
for svg in doc.getElementsByTagName("svg"):
mobjects = self.get_mobjects_from(svg)
if self.unpack_groups:
self.add(*mobjects)
else:
self.add(*mobjects[0].submobjects)
doc.unlink()
def get_mobjects_from(self, element):
"""Parses a given SVG element into a Mobject.
Parameters
----------
element : :class:`str`
The SVG data in the XML to be parsed.
Returns
-------
VMobject
A VMobject representing the associated SVG element.
"""
result = []
if not isinstance(element, minidom.Element):
return result
if element.tagName == "defs":
self.update_ref_to_element(element)
elif element.tagName == "style":
pass # TODO, handle style
elif element.tagName in ["g", "svg", "symbol"]:
result += it.chain(
*[self.get_mobjects_from(child) for child in element.childNodes]
)
elif element.tagName == "path":
temp = element.getAttribute("d")
if temp != "":
result.append(self.path_string_to_mobject(temp))
elif element.tagName == "use":
result += self.use_to_mobjects(element)
elif element.tagName == "rect":
result.append(self.rect_to_mobject(element))
elif element.tagName == "circle":
result.append(self.circle_to_mobject(element))
elif element.tagName == "ellipse":
result.append(self.ellipse_to_mobject(element))
elif element.tagName in ["polygon", "polyline"]:
result.append(self.polygon_to_mobject(element))
else:
pass # TODO
# warnings.warn("Unknown element type: " + element.tagName)
result = [m for m in result if m is not None]
self.handle_transforms(element, VGroup(*result))
if len(result) > 1 and not self.unpack_groups:
result = [VGroup(*result)]
return result
def g_to_mobjects(self, g_element):
"""Converts the ``g`` SVG element into VMobjects.
Parameters
----------
g_element : :class:`str`
A ``g`` element is a group of other SVG elements. As such a ``g`` element is equivalent to a VGroup.
Returns
-------
List[VMobject]
A list of VMobject represented by the group.
"""
mob = VGroup(*self.get_mobjects_from(g_element))
self.handle_transforms(g_element, mob)
return mob.submobjects
def path_string_to_mobject(self, path_string):
"""Converts a SVG path element's ``d`` attribute to a mobject.
Parameters
----------
path_string : str
A path with potentially multiple path commands to create a shape.
Returns
-------
VMobjectFromSVGPathstring
A VMobject from the given path string, or d attribute.
"""
return VMobjectFromSVGPathstring(path_string)
def use_to_mobjects(self, use_element):
"""Converts a SVG <use> element to VMobject.
Parameters
----------
use_element : str
An SVG <use> element which represents nodes that should be
duplicated elsewhere.
Returns
-------
VMobject
A VMobject
"""
# Remove initial "#" character
ref = use_element.getAttribute("xlink:href")[1:]
if ref not in self.ref_to_element:
warnings.warn("%s not recognized" % ref)
return VGroup()
return self.get_mobjects_from(self.ref_to_element[ref])
def attribute_to_float(self, attr):
"""A helper method which converts the attribute to float.
Parameters
----------
attr : str
An SVG path attribute.
Returns
-------
float
A float representing the attribute string value.
"""
stripped_attr = "".join(
[char for char in attr if char in string.digits + "." + "-"]
)
return float(stripped_attr)
def polygon_to_mobject(self, polygon_element):
"""Constructs a VMobject from a SVG <polygon> element.
Parameters
----------
polygon_element : str
An SVG polygon element.
Returns
-------
VMobjectFromSVGPathstring
A VMobject representing the polygon.
"""
# TODO, This seems hacky... yes it is.
path_string = polygon_element.getAttribute("points")
for digit in string.digits:
path_string = path_string.replace(" " + digit, " L" + digit)
path_string = "M" + path_string
return self.path_string_to_mobject(path_string)
# <circle class="st1" cx="143.8" cy="268" r="22.6"/>
def circle_to_mobject(self, circle_element):
"""Creates a Circle VMobject from a SVG <circle> command.
Parameters
----------
circle_element : str
A SVG circle path command.
Returns
-------
Circle
A Circle VMobject
"""
x, y, r = [
self.attribute_to_float(circle_element.getAttribute(key))
if circle_element.hasAttribute(key)
else 0.0
for key in ("cx", "cy", "r")
]
return Circle(radius=r).shift(x * RIGHT + y * DOWN)
def ellipse_to_mobject(self, circle_element):
"""Creates a stretched Circle VMobject from a SVG <circle> path
command.
Parameters
----------
circle_element : str
A SVG circle path command.
Returns
-------
Circle
A Circle VMobject
"""
x, y, rx, ry = [
self.attribute_to_float(circle_element.getAttribute(key))
if circle_element.hasAttribute(key)
else 0.0
for key in ("cx", "cy", "rx", "ry")
]
return Circle().scale(rx * RIGHT + ry * UP).shift(x * RIGHT + y * DOWN)
def rect_to_mobject(self, rect_element):
"""Converts a SVG <rect> command to a VMobject.
Parameters
----------
rect_element : str
A SVG rect path command.
Returns
-------
Rectangle
Creates either a Rectangle, or RoundRectangle, VMobject from a
rect element.
"""
fill_color = rect_element.getAttribute("fill")
stroke_color = rect_element.getAttribute("stroke")
stroke_width = rect_element.getAttribute("stroke-width")
corner_radius = rect_element.getAttribute("rx")
# input preprocessing
if fill_color in ["", "none", "#FFF", "#FFFFFF"] or Color(fill_color) == Color(
WHITE
):
opacity = 0
fill_color = BLACK # shouldn't be necessary but avoids error msgs
if fill_color in ["#000", "#000000"]:
fill_color = WHITE
if stroke_color in ["", "none", "#FFF", "#FFFFFF"] or Color(
stroke_color
) == Color(WHITE):
stroke_width = 0
stroke_color = BLACK
if stroke_color in ["#000", "#000000"]:
stroke_color = WHITE
if stroke_width in ["", "none", "0"]:
stroke_width = 0
if corner_radius in ["", "0", "none"]:
corner_radius = 0
corner_radius = float(corner_radius)
if corner_radius == 0:
mob = Rectangle(
width=self.attribute_to_float(rect_element.getAttribute("width")),
height=self.attribute_to_float(rect_element.getAttribute("height")),
stroke_width=stroke_width,
stroke_color=stroke_color,
fill_color=fill_color,
fill_opacity=opacity,
)
else:
mob = RoundedRectangle(
width=self.attribute_to_float(rect_element.getAttribute("width")),
height=self.attribute_to_float(rect_element.getAttribute("height")),
stroke_width=stroke_width,
stroke_color=stroke_color,
fill_color=fill_color,
fill_opacity=opacity,
corner_radius=corner_radius,
)
mob.shift(mob.get_center() - mob.get_corner(UP + LEFT))
return mob
def handle_transforms(self, element, mobject):
"""Applies the SVG transform to the specified mobject. Transforms include:
``rotate``, ``translate``, ``scale``, and ``skew``.
Parameters
----------
element : str
The transform command to perform
mobject : Mobject
The Mobject to transform.
"""
x, y = 0, 0
try:
x = self.attribute_to_float(element.getAttribute("x"))
# Flip y
y = -self.attribute_to_float(element.getAttribute("y"))
mobject.shift(x * RIGHT + y * UP)
except:
pass
transform = element.getAttribute("transform")
try: # transform matrix
prefix = "matrix("
suffix = ")"
if not transform.startswith(prefix) or not transform.endswith(suffix):
raise Exception()
transform = transform[len(prefix) : -len(suffix)]
transform = string_to_numbers(transform)
transform = np.array(transform).reshape([3, 2])
x = transform[2][0]
y = -transform[2][1]
matrix = np.identity(self.dim)
matrix[:2, :2] = transform[:2, :]
matrix[1] *= -1
matrix[:, 1] *= -1
for mob in mobject.family_members_with_points():
mob.points = np.dot(mob.points, matrix)
mobject.shift(x * RIGHT + y * UP)
except:
pass
try: # transform scale
prefix = "scale("
suffix = ")"
if not transform.startswith(prefix) or not transform.endswith(suffix):
raise Exception()
transform = transform[len(prefix) : -len(suffix)]
scale_values = string_to_numbers(transform)
if len(scale_values) == 2:
scale_x, scale_y = scale_values
mobject.scale(np.array([scale_x, scale_y, 1]), about_point=ORIGIN)
elif len(scale_values) == 1:
scale = scale_values[0]
mobject.scale(np.array([scale, scale, 1]), about_point=ORIGIN)
except:
pass
try: # transform translate
prefix = "translate("
suffix = ")"
if not transform.startswith(prefix) or not transform.endswith(suffix):
raise Exception()
transform = transform[len(prefix) : -len(suffix)]
x, y = string_to_numbers(transform)
mobject.shift(x * RIGHT + y * DOWN)
except:
pass
# TODO, ...
def flatten(self, input_list):
"""A helper method to flatten the ``input_list`` into an 1D array."""
output_list = []
for i in input_list:
if isinstance(i, list):
output_list.extend(self.flatten(i))
else:
output_list.append(i)
return output_list
def get_all_childNodes_have_id(self, element):
"""Gets all child nodes containing the `id` attribute and returns
them in a flattened list.
Parameters
--------
element : :class:`str`
An element from SVG XML data. Elements use a unique `id`.
Returns
-------
List[DOM element]
A flattened list of DOM elements containing the `id` attribute.
"""
all_childNodes_have_id = []
if not isinstance(element, minidom.Element):
return
if element.hasAttribute("id") and element.tagName not in ("g", "defs"):
return [element]
for e in element.childNodes:
all_childNodes_have_id.append(self.get_all_childNodes_have_id(e))
return self.flatten([e for e in all_childNodes_have_id if e])
def update_ref_to_element(self, defs):
"""Updates the ``ref_to_element`` dictionary.
Parameters
--------
defs : :class:`defs`
The new defs
"""
new_refs = dict(
[(e.getAttribute("id"), e) for e in self.get_all_childNodes_have_id(defs)]
)
self.ref_to_element.update(new_refs)
def move_into_position(self):
"""Uses the SVGMobject's config dictionary to set the Mobject's
width, height, and/or center it. Use ``width``, ``height``, and
``should_center`` respectively to modify this.
"""
if self.should_center:
self.center()
if self.height is not None:
self.set_height(self.height)
if self.width is not None:
self.set_width(self.width)
class VMobjectFromSVGPathstring(VMobject):
def __init__(self, path_string, **kwargs):
self.path_string = path_string
VMobject.__init__(self, **kwargs)
def get_path_commands(self):
"""Returns a list of possible path commands used within an SVG ``d``
attribute.
See: https://svgwg.org/svg2-draft/paths.html#DProperty for further
details on what each path command does.
Returns
-------
List[:class:`str`]
The various upper and lower cased path commands.
"""
result = [
"M", # moveto
"L", # lineto
"H", # horizontal lineto
"V", # vertical lineto
"C", # curveto
"S", # smooth curveto
"Q", # quadratic Bezier curve
"T", # smooth quadratic Bezier curveto
"A", # elliptical Arc
"Z", # closepath
]
result += [s.lower() for s in result]
return result
def generate_points(self):
"""Generates points from a given an SVG ``d`` attribute."""
pattern = "[%s]" % ("".join(self.get_path_commands()))
pairs = list(
zip(
re.findall(pattern, self.path_string),
re.split(pattern, self.path_string)[1:],
)
)
# Which mobject should new points be added to
self = self
for command, coord_string in pairs:
self.handle_command(command, coord_string)
# people treat y-coordinate differently
self.rotate(np.pi, RIGHT, about_point=ORIGIN)
def handle_command(self, command, coord_string):
"""Core logic for handling each of the various path commands."""
isLower = command.islower()
command = command.upper()
# new_points are the points that will be added to the curr_points
# list. This variable may get modified in the conditionals below.
points = self.points
new_points = self.string_to_points(coord_string)
if isLower and len(points) > 0:
new_points += points[-1]
if command == "M": # moveto
self.start_new_path(new_points[0])
if len(new_points) <= 1:
return
# Draw relative line-to values.
points = self.points
new_points = new_points[1:]
command = "L"
for p in new_points:
if isLower:
# Treat everything as relative line-to until empty
p[0] += self.points[-1, 0]
p[1] += self.points[-1, 1]
self.add_line_to(p)
return
elif command in ["L", "H", "V"]: # lineto
if command == "H":
new_points[0, 1] = points[-1, 1]
elif command == "V":
if isLower:
new_points[0, 0] -= points[-1, 0]
new_points[0, 0] += points[-1, 1]
new_points[0, 1] = new_points[0, 0]
new_points[0, 0] = points[-1, 0]
self.add_line_to(new_points[0])
return
if command == "C": # curveto
pass # Yay! No action required
elif command in ["S", "T"]: # smooth curveto
self.add_smooth_curve_to(*new_points)
# handle1 = points[-1] + (points[-1] - points[-2])
# new_points = np.append([handle1], new_points, axis=0)
return
elif command == "Q": # quadratic Bezier curve
# TODO, this is a suboptimal approximation
new_points = np.append([new_points[0]], new_points, axis=0)
elif command == "A": # elliptical Arc
raise NotImplementedError()
elif command == "Z": # closepath
return
# Add first three points
self.add_cubic_bezier_curve_to(*new_points[0:3])
# Handle situations where there's multiple relative control points
if len(new_points) > 3:
# Add subsequent offset points relatively.
for i in range(3, len(new_points), 3):
if isLower:
new_points[i : i + 3] -= points[-1]
new_points[i : i + 3] += new_points[i - 1]
self.add_cubic_bezier_curve_to(*new_points[i : i + 3])
def string_to_points(self, coord_string):
"""Since the SVG file's path command is provided as a string, this
converts the coordinates into numbers.
"""
numbers = string_to_numbers(coord_string)
if len(numbers) % 2 == 1:
numbers.append(0)
num_points = len(numbers) // 2
result = np.zeros((num_points, self.dim))
result[:, :2] = np.array(numbers).reshape((num_points, 2))
return result
def get_original_path_string(self):
"""A simple getter for the path's ``d`` attribute."""
return self.path_string
|
py
|
1a5586ed2a68db2a2541b980193105d77c0957f7
|
# coding=utf-8
import kNNImage
'''
输入手写数字的txt文件,来判断输入的数字为多少
'''
kNNImage.handWritingClassify()
|
py
|
1a55871e98e1e0c46b77e1d0f969e2c38423efaf
|
from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/jacobi-1d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/jacobi-1d/tmp_files/8733.c')
procedure('kernel_jacobi_1d')
loop(0)
known(' n > 2 ')
tile(0,2,16,2)
tile(1,2,64,2)
|
py
|
1a55878182214db9af28b16a925071cd8590d067
|
from api.models import Demand
from django.db.models import Count
def create(**kwargs):
return Demand.objects.create(**kwargs)
def get_demand(**kwargs):
return Demand.objects.filter(**kwargs).first()
def get_demands(**kwargs):
return Demand.objects.filter(**kwargs).all()
def demand_count(user):
return Demand.objects.all().annotate(demand_count=Count("user"))
|
py
|
1a55880fa577acfcc311db0643b630915a3f547f
|
from actors.components.component import Component
from functools import reduce
class Components(Component):
def __init__(self, components=frozenset()):
self.components = components
def update(self):
components = self
for component in self.components:
updated_component = component.update()
if component is not updated_component:
components = components.replace(component, updated_component)
return components
def attempt(self, action, actor, root, *args):
handler = next((getattr(c, action) for c in self.components if hasattr(c, action)), self.__swallow_attempt)
return handler(actor, root, *args)
def replace(self, old, new):
return Components(self.components - frozenset([old]) | frozenset([new]))
def print_to(self, x, y, media):
return reduce(lambda m, c: c.print_to(x, y, media), self.components, media)
def __swallow_attempt(self, actor, root, *args):
return root
|
py
|
1a5588afd5bee18eaa7d0363cf5b087c58d92387
|
# Converts an RSP file intended to MSVC linker into one argument per line.
import shlex
import sys
if len(sys.argv) != 2:
print sys.argv[0] + ': rsp_file'
sys.exit(1)
with open(sys.argv[1]) as f:
lines = [arg + '\n' for arg in shlex.split(f.read(), posix=False)]
with open(sys.argv[1], 'w') as f:
f.writelines(lines)
|
py
|
1a558904a04f9b45cd83be5830031de7e3fa5dc0
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organisations', '0002_auto_20160716_1939'),
]
operations = [
migrations.AddField(
model_name='organisation',
name='address_map_url',
field=models.URLField(null=True, blank=True),
),
migrations.AddField(
model_name='organisation',
name='full_address',
field=models.TextField(null=True, blank=True),
),
migrations.AddField(
model_name='organisation',
name='pincode',
field=models.CharField(max_length=6, null=True, blank=True),
),
]
|
py
|
1a5589312fb31d9b6abf478dc23dc4ea7e9998be
|
import logging
import datetime
from ipyc import AsyncIPyCHost, AsyncIPyCLink
host = AsyncIPyCHost()
# logging.basicConfig(level=logging.DEBUG)
@host.on_connect
async def on_connection(connection: AsyncIPyCLink):
connection_idx = len(host.connections)
print(f'We got a new connection! ({connection_idx})')
while connection.is_active():
message = await connection.receive()
if message:
print(f"[{datetime.datetime.now()}] - Connection {connection_idx} says: {message}")
print(f"[{datetime.datetime.now()}] - Connection {connection_idx} was closed!")
print('Starting to wait for connections!')
host.run()
|
py
|
1a55896a1af3123f808340bc16f14d99f7bfc586
|
import pathlib
import os
import shutil
from flask import Flask
import logging
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_daq as daq
import dash_html_components as html
import numpy as np
import plotly.graph_objs as go
import roslibpy
import time
from dash.dependencies import State, Input, Output
import pointcloud_msg
server = Flask(__name__)
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
app = dash.Dash(
__name__, external_stylesheets=[dbc.themes.SLATE],
meta_tags=[
{"name": "viewport", "content": "width=device-width, initial-scale=1.0"}
],
title="Open AgroLiDAR Control",
server=server
)
# This is for gunicorn
#server = app.server
# Mapbox
MAPBOX_ACCESS_TOKEN = "pk.eyJ1IjoiamFqYmVybmkiLCJhIjoiY2oyMXFsZjdsMDAxNTJybzd0bDNxczZyeCJ9.6EKxvkWLdnzNI0RJLAsimA"
MAPBOX_STYLE = "mapbox://styles/jajberni/ckk35n9qg3uvw17qg3du25oyb"
GPS_FIX_COLORS = {6: "#67d03b", 5: "#f9f025", 4: "#f97654", 3: "#f97654", 2: "#f43c4e", 1: "#f43c4e", 0: "#f43c4e",
-1: "#f43c4e"}
DEFAULT_FOLDER = "/data"
class StatusManager:
"""Class to store information useful to callbacks"""
def __init__(self):
self.is_connected = False
self.scan_count = 0
self.lat = 0.0
self.lon = 0.0
self.h_accuracy = 0.0
self.v_accuracy = 0.0
self.gps_status = -1
self.lat_path = []
self.lon_path = []
self.fix_path = []
self.speed_kph = 0.0
self.last_cloud = None
self.last_gps = None
self.last_pose = None
self.last_sat_count = 0
self.rtk_listener = None
self.cloud_listener = None
self.pose_listener = None
self.project_talker = None
self.project_service = None
self.project_list = []
ros_master = '127.0.0.1'
if "ROS_MASTER_HOSTNAME" in os.environ:
ros_master = os.environ["ROS_MASTER_HOSTNAME"]
self.client = roslibpy.Ros(host=ros_master, port=9090)
self.connect()
def connect(self):
try:
self.client.run(timeout=50)
self.is_connected = True
self.create_listeners()
except Exception as ex:
self.is_connected = False
print("Error connecting to ROS")
def rtk_callback(self, msg):
self.last_gps = msg
self.lat = msg['lat'] /1e7
self.lon = msg['lon'] / 1e7
self.h_accuracy = msg['h_acc']/1e3
self.v_accuracy = msg['v_acc']/1e3
self.gps_status = msg['fix_type']
self.lat_path.append(self.lat)
self.lon_path.append(self.lon)
self.fix_path.append(self.gps_status)
self.speed_kph = msg['vel']*0.036
self.last_sat_count = msg['satellites_visible']
#print(msg['cog']/100, msg['fix_type'], msg['dgps_age'])
def cloud_callback(self, msg):
self.scan_count += 1
self.last_cloud = pointcloud_msg.msg_to_cloud(msg)
def pose_callback(self, msg):
self.last_pose = msg
def start_recording(self, project_name):
if self.is_connected:
self.project_talker.publish(roslibpy.Message({'data': project_name}))
def stop_recording(self):
if self.is_connected:
self.project_talker.publish(roslibpy.Message({'data': '.'}))
def create_listeners(self):
if self.is_connected:
self.project_talker = roslibpy.Topic(self.client, '/project/name', 'std_msgs/String')
self.rtk_listener = roslibpy.Topic(self.client, '/mavros/gpsstatus/gps1/raw', 'mavros_msgs/GPSRAW')
self.rtk_listener.subscribe(self.rtk_callback)
self.cloud_listener = roslibpy.Topic(self.client, '/laserMapping/laser_points', 'sensor_msgs/PointCloud2')
self.cloud_listener.subscribe(self.cloud_callback)
self.pose_listener = roslibpy.Topic(self.client, '/mavros/global_position/local', 'nav_msgs/Odometry')
self.pose_listener.subscribe(self.pose_callback)
self.project_service = roslibpy.Service(self.client, '/project_service', 'agrolaser_node/ProjectService')
project_request = roslibpy.ServiceRequest({'request_string': 'list', 'project': ''})
resp = self.project_service.call(project_request)
if len(resp['list_strings']) == 0:
self.project_list = [{'label': 'Test', 'value': 'Test'}]
else:
self.project_list = [{'label': project_name, 'value': project_name} for project_name in sorted(resp['list_strings'])]
local_vars = StatusManager()
# Point Cloud graph components
# Helix equation for demo
t = np.linspace(0, 10, 50)
x, y, z = np.cos(t), np.sin(t), t
default_fig = go.Figure(data=[go.Scatter3d(x=x, y=y, z=z,
mode='markers',
marker=dict(
size=1,
opacity=0.8
))])
default_fig.update_layout(hovermode=False)
default_fig.update_layout(margin=dict(l=0, r=0, b=0, t=0))
default_fig.update_scenes(aspectmode='manual', aspectratio_z=0.1)
cloud_graph_card = dbc.Card(
dcc.Graph(id='point-cloud-graph', figure=default_fig, config={
'displayModeBar': False,
}), body=True
)
update_button = dbc.Button(
"Clear Point Cloud", id="update", n_clicks=0, color="primary", outline=True
)
update_button_2 = dbc.Button(
"Force Update", id="update-2", n_clicks=0, color="primary", outline=True
)
setup_button = html.Div(
[
dbc.Button("Options", id="open-setup"),
dbc.Modal(
[
dbc.ModalHeader("Options"),
dbc.ModalBody("This is the content of the modal"),
dbc.ModalFooter(
dbc.Button("Close", id="close-setup", className="ml-auto")
),
],
id="setup-modal",
),
]
)
new_project_button = html.Div(
[
dbc.Button("New Project", id="new-project-button"),
dbc.Modal(
[dbc.Row(
dbc.Col(dbc.FormGroup(
[
dbc.Label("Project Name", className="mr-2"),
dbc.Input(id="input-new-project-name", type="text", placeholder="Enter project"),
],
className="mr-3",
)), form=True),
dbc.Row(
dbc.Col(dbc.FormGroup(
[
dbc.Label("Description", className="mr-2"),
dbc.Input(type="text", placeholder="Enter description"),
],
className="mr-3",
)), form=True),
dbc.Row([
dbc.Col(dbc.Button("Accept", color="primary", id="accept-project-button")),
dbc.Col(dbc.Button("Cancel", color="primary", id="cancel-project-button")),
],
)
],
id="new-project-modal",
),
]
)
# Dash_DAQ elements
utc = html.Div(
id="control-panel-utc",
children=[
daq.LEDDisplay(
id="control-panel-utc-component",
value="16:23",
label="Time",
size=40,
color="#fec036",
backgroundColor="#2b2b2b",
)
],
n_clicks=0,
)
speed = daq.Gauge(
id="control-panel-speed-component",
label="Speed",
min=0,
max=10,
showCurrentValue=True,
value=4.0,
size=175,
units="km/h",
color="#fec036",
)
scan_count = daq.LEDDisplay(
id="control-panel-scans-component",
value="0000000",
label="Scans",
size=24,
color="#fec036",
style={"color": "#black"},
backgroundColor="#2b2b2b",
)
storage_indicator = html.Div(
id="control-panel-disk",
children=[
daq.GraduatedBar(
id="control-panel-disk-component",
label="Disk Capacity",
min=0,
max=100,
value=76,
step=1,
showCurrentValue=True,
color="#fec036",
)
],
n_clicks=0,
)
battery_indicator = html.Div(
id="control-panel-battery",
children=[
daq.GraduatedBar(
id="control-panel-battery-component",
label="Battery-Level",
min=0,
max=100,
value=85,
step=1,
showCurrentValue=True,
color="#fec036",
)
],
n_clicks=0,
)
longitude = daq.LEDDisplay(
id="control-panel-longitude-component",
value="0000.0000",
label="Longitude",
size=24,
color="#fec036",
style={"color": "#black"},
backgroundColor="#2b2b2b",
)
latitude = daq.LEDDisplay(
id="control-panel-latitude-component",
value="0050.9789",
label="Latitude",
size=24,
color="#fec036",
style={"color": "#black"},
backgroundColor="#2b2b2b",
)
h_accuracy = daq.LEDDisplay(
id="control-panel-h-accuracy-component",
value="0.0000",
label="H Accuracy (m)",
size=24,
color="#fec036",
style={"color": "#black"},
backgroundColor="#2b2b2b",
)
v_accuracy = daq.LEDDisplay(
id="control-panel-v-accuracy-component",
value="0.0000",
label="V Accuracy (m)",
size=24,
color="#fec036",
style={"color": "#black"},
backgroundColor="#2b2b2b",
)
satellites = html.Div([
dbc.Row([
dbc.Col(
daq.LEDDisplay(
id="satellite-count",
value="00",
label="Satellites",
size=24,
color="#fec036",
style={"color": "#black"},
backgroundColor="#2b2b2b",
)
),
dbc.Col(
daq.Indicator(
id="rtk-indicator",
label="RTK Status",
labelPosition="bottom",
value=True,
color="#15e82e",
style={"color": "#black"},
)
),
], no_gutters=True, align="center")
])
gps_card = dbc.Card([
satellites,
dbc.Row([
dbc.Col([
latitude,
longitude]),
dbc.Col([
h_accuracy,
v_accuracy]),
])
])
map_toggle = daq.ToggleSwitch(
id="control-panel-toggle-map",
value=True,
label=["Hide path", "Show path"],
color="#ffe102",
style={"color": "#black"},
)
# Side panel
project_dropdown_text = html.P(
id="project-dropdown-text", children=["Control"]
)
"""project_dropdown = dbc.FormGroup(
[
dbc.Label("Camera Position"),
dbc.Select(
id="project",
options=[
{"label": "New Project...", "value": "new_project"},
{"label": "Project 1", "value": "project_1"},
{"label": "Project 2", "value": "project_2"},
],
value="project_1",
),
]
)
"""
project_select = dbc.InputGroup(
[
dbc.InputGroupAddon("Select Project", addon_type="prepend"),
dbc.Select(
id="project-dropdown-component",
options=local_vars.project_list,
value=local_vars.project_list[0]['value']
),
dbc.InputGroupAddon(
new_project_button,
addon_type="append",
),
]
),
project_title = html.H1(id="project-name", children="")
recording_button = daq.PowerButton(
id='recording-button',
on=False,
color='#FF5E5E',
size=80,
label='Record',
labelPosition='top'
)
project_body = html.P(
className="project-description", id="project-description", children=[""]
)
side_panel_layout = html.Div(
id="panel-side",
children=[
dbc.Card([
dbc.Row([
dbc.Col(project_select),
]),
dbc.Row([
dbc.Col(recording_button),
]),
dbc.Row([
dbc.Col(update_button),
dbc.Col(update_button_2),
dbc.Col(setup_button),
])
]),
],
)
# project location tracker
# Helper to straighten lines on the map
def flatten_path(xy1, xy2):
diff_rate = (xy2 - xy1) / 100
res_list = []
for i in range(100):
res_list.append(xy1 + i * diff_rate)
return res_list
map_data = [
{
"type": "scattermapbox",
"lat": [0],
"lon": [0],
"hoverinfo": "text+lon+lat",
"text": "LiDAR Path",
"mode": "lines",
"line": {"width": 3, "color": "#126de3"},
},
{
"type": "scattermapbox",
"lat": [0],
"lon": [0],
"hoverinfo": "text+lon+lat",
"text": "Current Position",
"mode": "markers",
"marker": {"size": 10, "color": "#fec036"},
},
]
map_layout = {
"mapbox": {
"accesstoken": MAPBOX_ACCESS_TOKEN,
"style": MAPBOX_STYLE,
"center": {"lat": 37.8, "lon": -4.8}, "zoom": 18,
},
"showlegend": False,
"autosize": True,
"paper_bgcolor": "#1e1e1e",
"plot_bgcolor": "#1e1e1e",
"margin": {"t": 0, "r": 0, "b": 0, "l": 0},
}
map_graph = dbc.Card(
id="world-map-wrapper",
children=[
map_toggle,
dcc.Graph(
id="world-map",
figure={"data": map_data, "layout": map_layout},
config={"displayModeBar": False, "scrollZoom": True},
)
],
body=True
)
main_panel_card = html.Div([
dcc.Interval(id="interval", interval=1 * 2000, n_intervals=0),
dcc.Interval(id="interval-fast", interval=500, n_intervals=0),
dbc.Card([
dbc.Row([
dbc.Col(speed, width=3),
dbc.Col([dbc.Row(utc), dbc.Row(scan_count), dbc.Row(storage_indicator)], width=3),
dbc.Col(gps_card, width=5)
]
),
]),
dbc.Card(dbc.Row([dbc.Col(cloud_graph_card, width=6), dbc.Col(map_graph, width=6)]))
])
# Data generation
# Pandas
APP_PATH = str(pathlib.Path(__file__).parent.resolve())
# Root
root_layout = dbc.Container(
[
dcc.Store(id="store-placeholder"),
dcc.Store(
id="store-data"),
html.H1("Open PhenoLiDAR Control"),
html.Hr(),
dbc.Row([
dbc.Col(main_panel_card, md=8),
dbc.Col(side_panel_layout, md=4),
], align="start")
], fluid=True,
)
app.layout = root_layout
# Callback free space
@app.callback(
Output("control-panel-disk-component", "value"), [Input("interval", "n_intervals")]
)
def update_free_disk(interval):
total, used, free = shutil.disk_usage("/")
free_pc = 100 * used / total
return free_pc
# Callbacks Data
# Callbacks Components
@app.callback(
Output("control-panel-utc-component", "value"), [Input("interval", "n_intervals")]
)
def update_time(interval):
hour = time.localtime(time.time())[3]
hour = str(hour).zfill(2)
minute = time.localtime(time.time())[4]
minute = str(minute).zfill(2)
return hour + ":" + minute
@app.callback(
[
Output("control-panel-latitude-component", "value"),
Output("control-panel-longitude-component", "value"),
Output("control-panel-h-accuracy-component", "value"),
Output("control-panel-v-accuracy-component", "value"),
Output("control-panel-scans-component", "value"),
Output("satellite-count", "value"),
Output("rtk-indicator", "value"),
Output("rtk-indicator", "color"),
],
[Input("interval", "n_intervals")],
)
def update_gps_component(clicks):
rtk_status = False
rtk_color = "#fec036"
if local_vars.gps_status > 5:
rtk_status = True
if local_vars.gps_status < 3:
rtk_color = "#dc1330"
elif local_vars.gps_status == 5:
rtk_color = "#f9f025"
elif local_vars.gps_status == 6:
rtk_color = "#6bd71f"
else:
rtk_color = "#dc1330"
return "{:.4f}".format(local_vars.lat), "{:.4f}".format(
local_vars.lon), "{:.3f}".format(local_vars.h_accuracy), "{:.3f}".format(
local_vars.v_accuracy), "{:08d}".format(local_vars.scan_count), local_vars.last_sat_count, rtk_status, rtk_color
@app.callback(Output("control-panel-speed-component", "value"),
[Input("interval-fast", "n_intervals")],
)
def update_speed_component(clicks):
return local_vars.speed_kph
@app.callback(
Output("point-cloud-graph", "figure"),
[Input("update", "n_clicks"), ],
[State("point-cloud-graph", "figure")]
)
def create_cloud_graph(clicks, graph_data):
if local_vars.last_cloud is not None:
# print(graph_data)
# print(local_vars.last_cloud.points.head())
df = local_vars.last_cloud
graph_data['data'] = [
go.Scatter3d(
x=df['x'],
y=df['y'],
z=df['z'],
mode='markers',
marker=dict(
size=1,
color=df['intensity'],
opacity=0.8
)
)
]
else:
print("No data")
return graph_data
@app.callback(
Output("point-cloud-graph", "extendData"),
[Input("interval", "n_intervals"), Input("update-2", "n_clicks"), ],
[State("point-cloud-graph", "figure")]
)
def update_cloud_graph(interval, clicks, graph_data):
# print(graph_data['data'])
if local_vars.last_cloud is not None:
df = local_vars.last_cloud
data = [go.Scatter3d(
x=df['x'],
y=df['y'],
z=df['z'],
mode='markers',
marker=dict(
size=1,
color=df['intensity'],
opacity=0.8
)
)]
# print(data[0]['marker'])
if graph_data is None:
return
if len(graph_data['data']) > 0:
# return data[0], [0]
return dict(x=[data[0]['x']], y=[data[0]['y']], z=[data[0]['z']]), [
0] # , marker=dict(color=[data[0]['marker']['color']])), [0]
# return data
@app.callback(
Output("world-map", "figure"),
[
Input("interval", "n_intervals"),
Input("control-panel-toggle-map", "value"),
],
[
State("world-map", "figure"),
State("store-data", "data"),
],
)
def update_word_map(clicks, toggle, old_figure, data):
figure = old_figure
figure["data"][1]["lat"] = [local_vars.lat]
figure["data"][1]["lon"] = [local_vars.lon]
figure["data"][1]["marker"]["color"] = GPS_FIX_COLORS[local_vars.gps_status]
figure["layout"]["mapbox"]["center"] = {"lat": local_vars.lat, "lon": local_vars.lon}
if not toggle:
figure["data"][0]["lat"] = []
figure["data"][0]["lon"] = []
else:
figure["data"][0]["lat"] = local_vars.lat_path
figure["data"][0]["lon"] = local_vars.lon_path
return figure
@app.callback(
[Output("project-dropdown-component", "disabled"), Output("new-project-button", "disabled")],
[
Input("recording-button", "on"),
Input("project-dropdown-component", "value"),
],
)
def recording_control(on, project):
if project != 'new_project':
if on:
print("Start record: " + project)
local_vars.start_recording(project)
else:
local_vars.stop_recording()
return on, on
@app.callback(
Output("setup-modal", "is_open"),
[Input("open-setup", "n_clicks"), Input("close-setup", "n_clicks")],
[State("setup-modal", "is_open")],
)
def toggle_modal(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
[Output("new-project-modal", "is_open"), Output("project-dropdown-component", "options")],
[Input("new-project-button", "n_clicks"), Input("accept-project-button", "n_clicks"),
Input("cancel-project-button", "n_clicks"), Input("input-new-project-name", "value")],
[State("new-project-modal", "is_open"), State("accept-project-button", "n_clicks"), State("cancel-project-button", "n_clicks")],
)
def toggle_modal(n1, n2, n3, new_project_name, is_open, n2_s, n3_s):
if n1 is None:
return is_open, local_vars.project_list
if n2 == n1:
print("Create new project: " + new_project_name)
resp = local_vars.project_service.call(roslibpy.ServiceRequest({'request_string': 'create', 'project': new_project_name}))
project_request = roslibpy.ServiceRequest({'request_string': 'list', 'project': ''})
local_vars.project_list = [{'label': project_name, 'value': project_name} for project_name in
sorted(local_vars.project_service.call(project_request)['list_strings'])]
return False, local_vars.project_list
if n3 == n1:
return False, local_vars.project_list
if n1:
return True, local_vars.project_list
return is_open, local_vars.project_list
if __name__ == "__main__":
debug = True
port = 8051
if "DASH_DEBUG_MODE" in os.environ:
debug = False if os.environ["DASH_DEBUG_MODE"] == "False" else True
if "DASH_PORT" in os.environ:
port = os.environ["DASH_PORT"]
app.run_server(host="0.0.0.0", port=port, debug=debug)
|
py
|
1a558b2caf88a1ebde222167aee0fe867c6fc1a2
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConnectToSourceMySqlTaskInput(Model):
"""Input for the task that validates MySQL database connection.
All required parameters must be populated in order to send to Azure.
:param source_connection_info: Required. Information for connecting to
MySQL source
:type source_connection_info:
~azure.mgmt.datamigration.models.MySqlConnectionInfo
:param target_platform: Target Platform for the migration. Possible values
include: 'AzureDbForMySQL'
:type target_platform: str or
~azure.mgmt.datamigration.models.MySqlTargetPlatformType
:param check_permissions_group: Permission group for validations. Possible
values include: 'Default', 'MigrationFromSqlServerToAzureDB',
'MigrationFromSqlServerToAzureMI', 'MigrationFromMySQLToAzureDBForMySQL'
:type check_permissions_group: str or
~azure.mgmt.datamigration.models.ServerLevelPermissionsGroup
"""
_validation = {
'source_connection_info': {'required': True},
}
_attribute_map = {
'source_connection_info': {'key': 'sourceConnectionInfo', 'type': 'MySqlConnectionInfo'},
'target_platform': {'key': 'targetPlatform', 'type': 'str'},
'check_permissions_group': {'key': 'checkPermissionsGroup', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ConnectToSourceMySqlTaskInput, self).__init__(**kwargs)
self.source_connection_info = kwargs.get('source_connection_info', None)
self.target_platform = kwargs.get('target_platform', None)
self.check_permissions_group = kwargs.get('check_permissions_group', None)
|
py
|
1a558c8763a8bf138b6ce243f02c18b5324f2035
|
from mavetools.validators import dataset_validators
def validate_all(countfile=None, scorefile=None, scorejson=None):
"""
By calling other helper functions, this function runs all of the validation code
"""
validate_dataset(countfile, scorefile, scorejson)
def validate_dataset(countfile=None, scorefile=None, scorejson=None):
"""
This function calls all of the validation functions within
mavetools/mavetools/validators/dataset_validation.py
Returns
-------
"""
# how to incorporate word limit validator?
if scorefile is not None:
# open scorefile
open(scorefile)
# this one returns header
scoreheader = dataset_validators.read_header_from_io(file=scorefile)
# if the header was returned, do these ones
dataset_validators.validate_has_hgvs_in_header(header=scoreheader)
dataset_validators.validate_at_least_one_additional_column(header=scoreheader)
dataset_validators.validate_header_contains_no_null_columns(header=scoreheader)
dataset_validators.validate_scoreset_score_data_input(file=scorefile)
if scorejson is not None:
# open scorejson
open(scorejson)
dataset_validators.validate_scoreset_json(dict_=scorejson)
if countfile is not None:
# open countfile
open(countfile)
countheader = dataset_validators.read_header_from_io(file=countfile)
# if the header was returned, do these ones
dataset_validators.validate_has_hgvs_in_header(header=countheader)
dataset_validators.validate_at_least_one_additional_column(header=countheader)
dataset_validators.validate_header_contains_no_null_columns(header=countheader)
dataset_validators.validate_scoreset_count_data_input(file=countfile)
if scorefile is not None and countfile is not None:
dataset_validators.validate_datasets_define_same_variants(
scores=scorefile, counts=countfile
)
|
py
|
1a558de7f88a73ac33572a81197f78245d5bb3a3
|
# coding: utf-8
"""
Spinnaker API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class HttpEntity(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'body': 'object'
}
attribute_map = {
'body': 'body'
}
def __init__(self, body=None): # noqa: E501
"""HttpEntity - a model defined in Swagger""" # noqa: E501
self._body = None
self.discriminator = None
if body is not None:
self.body = body
@property
def body(self):
"""Gets the body of this HttpEntity. # noqa: E501
:return: The body of this HttpEntity. # noqa: E501
:rtype: object
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this HttpEntity.
:param body: The body of this HttpEntity. # noqa: E501
:type: object
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(HttpEntity, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, HttpEntity):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py
|
1a558e4eea28f2dec2a481faba8b6d7233186cf9
|
import shutil
import pytest
import yaml
from click.testing import CliRunner
from kedro.extras.datasets.pandas import CSVDataSet
from kedro.io import DataCatalog, MemoryDataSet
from kedro.pipeline import Pipeline, node
@pytest.fixture
def fake_load_context(mocker):
context = mocker.MagicMock()
return mocker.patch(
"kedro.framework.session.KedroSession.load_context", return_value=context
)
PIPELINE_NAME = "pipeline"
@pytest.fixture
def mock_pipelines(mocker):
dummy_pipelines = {PIPELINE_NAME: Pipeline([]), "second": Pipeline([])}
return mocker.patch("kedro.framework.cli.catalog.pipelines", dummy_pipelines)
@pytest.mark.usefixtures(
"chdir_to_dummy_project", "fake_load_context", "mock_pipelines"
)
class TestCatalogListCommand:
def test_list_all_pipelines(self, fake_project_cli, fake_metadata, mocker):
yaml_dump_mock = mocker.patch("yaml.dump", return_value="Result YAML")
result = CliRunner().invoke(
fake_project_cli, ["catalog", "list"], obj=fake_metadata
)
assert not result.exit_code
expected_dict = {
"DataSets in 'pipeline' pipeline": {},
"DataSets in 'second' pipeline": {},
}
yaml_dump_mock.assert_called_once_with(expected_dict)
def test_list_specific_pipelines(self, fake_project_cli, fake_metadata, mocker):
yaml_dump_mock = mocker.patch("yaml.dump", return_value="Result YAML")
result = CliRunner().invoke(
fake_project_cli,
["catalog", "list", "--pipeline", PIPELINE_NAME],
obj=fake_metadata,
)
assert not result.exit_code
expected_dict = {f"DataSets in '{PIPELINE_NAME}' pipeline": {}}
yaml_dump_mock.assert_called_once_with(expected_dict)
def test_not_found_pipeline(self, fake_project_cli, fake_metadata):
result = CliRunner().invoke(
fake_project_cli,
["catalog", "list", "--pipeline", "fake"],
obj=fake_metadata,
)
assert result.exit_code
expected_output = (
"Error: `fake` pipeline not found! Existing pipelines: pipeline, second"
)
assert expected_output in result.output
def test_no_param_datasets_in_respose(
self, fake_project_cli, fake_metadata, fake_load_context, mocker, mock_pipelines
):
yaml_dump_mock = mocker.patch("yaml.dump", return_value="Result YAML")
mocked_context = fake_load_context.return_value
catalog_data_sets = {
"iris_data": CSVDataSet("test.csv"),
"intermediate": MemoryDataSet(),
"parameters": MemoryDataSet(),
"params:data_ratio": MemoryDataSet(),
"not_used": CSVDataSet("test2.csv"),
}
mocked_context.catalog = DataCatalog(data_sets=catalog_data_sets)
mocker.patch.object(
mock_pipelines[PIPELINE_NAME],
"data_sets",
return_value=catalog_data_sets.keys() - {"not_used"},
)
result = CliRunner().invoke(
fake_project_cli,
["catalog", "list"],
obj=fake_metadata,
)
assert not result.exit_code
# 'parameters' and 'params:data_ratio' should not appear in the response
expected_dict = {
f"DataSets in '{PIPELINE_NAME}' pipeline": {
"Datasets mentioned in pipeline": {
"CSVDataSet": ["iris_data"],
"MemoryDataSet": ["intermediate"],
},
"Datasets not mentioned in pipeline": {"CSVDataSet": ["not_used"]},
}
}
key = f"DataSets in '{PIPELINE_NAME}' pipeline"
assert yaml_dump_mock.call_count == 1
assert yaml_dump_mock.call_args[0][0][key] == expected_dict[key]
def test_default_dataset(
self, fake_project_cli, fake_metadata, fake_load_context, mocker, mock_pipelines
):
"""Test that datasets that are found in `Pipeline.data_sets()`,
but not in the catalog, are outputted under the key "DefaultDataset".
"""
yaml_dump_mock = mocker.patch("yaml.dump", return_value="Result YAML")
mocked_context = fake_load_context.return_value
catalog_data_sets = {"some_dataset": CSVDataSet("test.csv")}
mocked_context.catalog = DataCatalog(data_sets=catalog_data_sets)
mocker.patch.object(
mock_pipelines[PIPELINE_NAME],
"data_sets",
return_value=catalog_data_sets.keys() | {"intermediate"},
)
result = CliRunner().invoke(
fake_project_cli,
["catalog", "list"],
obj=fake_metadata,
)
assert not result.exit_code
expected_dict = {
f"DataSets in '{PIPELINE_NAME}' pipeline": {
"Datasets mentioned in pipeline": {
"CSVDataSet": ["some_dataset"],
"DefaultDataSet": ["intermediate"],
}
}
}
key = f"DataSets in '{PIPELINE_NAME}' pipeline"
assert yaml_dump_mock.call_count == 1
assert yaml_dump_mock.call_args[0][0][key] == expected_dict[key]
def identity(data):
return data # pragma: no cover
@pytest.mark.usefixtures("chdir_to_dummy_project")
class TestCatalogCreateCommand:
PIPELINE_NAME = "de"
@staticmethod
@pytest.fixture(params=["base"])
def catalog_path(request, fake_repo_path):
catalog_path = fake_repo_path / "conf" / request.param / "catalog"
yield catalog_path
shutil.rmtree(catalog_path, ignore_errors=True)
def test_pipeline_argument_is_required(self, fake_project_cli):
result = CliRunner().invoke(fake_project_cli, ["catalog", "create"])
assert result.exit_code
expected_output = "Error: Missing option '--pipeline' / '-p'."
assert expected_output in result.output
@pytest.mark.usefixtures("fake_load_context")
def test_not_found_pipeline(self, fake_project_cli, fake_metadata, mock_pipelines):
result = CliRunner().invoke(
fake_project_cli,
["catalog", "create", "--pipeline", "fake"],
obj=fake_metadata,
)
assert result.exit_code
existing_pipelines = ", ".join(sorted(mock_pipelines.keys()))
expected_output = (
f"Error: `fake` pipeline not found! Existing "
f"pipelines: {existing_pipelines}\n"
)
assert expected_output in result.output
def test_catalog_is_created_in_base_by_default(
self, fake_project_cli, fake_metadata, fake_repo_path, catalog_path
):
main_catalog_path = fake_repo_path / "conf" / "base" / "catalog.yml"
main_catalog_config = yaml.safe_load(main_catalog_path.read_text())
assert "example_iris_data" in main_catalog_config
data_catalog_file = catalog_path / f"{self.PIPELINE_NAME}.yml"
result = CliRunner().invoke(
fake_project_cli,
["catalog", "create", "--pipeline", self.PIPELINE_NAME],
obj=fake_metadata,
)
assert not result.exit_code
assert data_catalog_file.is_file()
expected_catalog_config = {
"example_test_x": {"type": "MemoryDataSet"},
"example_test_y": {"type": "MemoryDataSet"},
"example_train_x": {"type": "MemoryDataSet"},
"example_train_y": {"type": "MemoryDataSet"},
}
catalog_config = yaml.safe_load(data_catalog_file.read_text())
assert catalog_config == expected_catalog_config
@pytest.mark.parametrize("catalog_path", ["local"], indirect=True)
def test_catalog_is_created_in_correct_env(
self, fake_project_cli, fake_metadata, catalog_path
):
data_catalog_file = catalog_path / f"{self.PIPELINE_NAME}.yml"
env = catalog_path.parent.name
result = CliRunner().invoke(
fake_project_cli,
["catalog", "create", "--pipeline", self.PIPELINE_NAME, "--env", env],
obj=fake_metadata,
)
assert not result.exit_code
assert data_catalog_file.is_file()
def test_no_missing_datasets(
self,
fake_project_cli,
fake_metadata,
fake_load_context,
fake_repo_path,
mock_pipelines,
):
mocked_context = fake_load_context.return_value
catalog_data_sets = {
"input_data": CSVDataSet("test.csv"),
"output_data": CSVDataSet("test2.csv"),
}
mocked_context.catalog = DataCatalog(data_sets=catalog_data_sets)
mocked_context.project_path = fake_repo_path
mock_pipelines[self.PIPELINE_NAME] = Pipeline(
[node(identity, "input_data", "output_data")]
)
data_catalog_file = (
fake_repo_path / "conf" / "base" / "catalog" / f"{self.PIPELINE_NAME}.yml"
)
result = CliRunner().invoke(
fake_project_cli,
["catalog", "create", "--pipeline", self.PIPELINE_NAME],
obj=fake_metadata,
)
assert not result.exit_code
assert not data_catalog_file.exists()
@pytest.mark.usefixtures("fake_repo_path")
def test_missing_datasets_appended(
self, fake_project_cli, fake_metadata, catalog_path
):
data_catalog_file = catalog_path / f"{self.PIPELINE_NAME}.yml"
assert not catalog_path.exists()
catalog_path.mkdir()
catalog_config = {
"example_test_x": {"type": "pandas.CSVDataSet", "filepath": "test.csv"}
}
with data_catalog_file.open(mode="w") as catalog_file:
yaml.safe_dump(catalog_config, catalog_file, default_flow_style=False)
result = CliRunner().invoke(
fake_project_cli,
["catalog", "create", "--pipeline", self.PIPELINE_NAME],
obj=fake_metadata,
)
assert not result.exit_code
expected_catalog_config = {
"example_test_x": catalog_config["example_test_x"],
"example_test_y": {"type": "MemoryDataSet"},
"example_train_x": {"type": "MemoryDataSet"},
"example_train_y": {"type": "MemoryDataSet"},
}
catalog_config = yaml.safe_load(data_catalog_file.read_text())
assert catalog_config == expected_catalog_config
def test_bad_env(self, fake_project_cli, fake_metadata):
"""Test error when provided conf environment does not exist"""
env = "no_such_env"
cmd = ["catalog", "list", "-e", env, "--pipeline", PIPELINE_NAME]
result = CliRunner().invoke(fake_project_cli, cmd, obj=fake_metadata)
assert result.exit_code
assert "Unable to instantiate Kedro session" in result.output
|
py
|
1a558e95cd46725ed3e69f5d0057e7cd778bb0b4
|
"""Dyson test configuration."""
from unittest.mock import patch
import pytest
from . import CREDENTIAL, HOST, SERIAL
from .mocked_mqtt import MockedMQTT
@pytest.fixture()
def mqtt_client(request: pytest.FixtureRequest) -> MockedMQTT:
"""Return mocked mqtt client."""
device_type = request.module.DEVICE_TYPE
status = request.module.STATUS
environmental_data = request.module.ENVIRONMENTAL_DATA
mocked_mqtt = MockedMQTT(
HOST,
SERIAL,
CREDENTIAL,
f"{device_type}/{SERIAL}/command",
f"{device_type}/{SERIAL}/status/current",
status,
environmental_data,
)
with patch("libdyson.dyson_device.mqtt.Client", mocked_mqtt.refersh), patch(
"libdyson.dyson_device.TIMEOUT", 0
):
yield mocked_mqtt
|
py
|
1a558eff14d1b8d8f34d2f540bbf983bd19d90d2
|
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
# Yields a list chunk of size n from list
def group(list, n):
for i in range(0, len(list), n):
yield list[i:i+n]
# Takes requested data and breaks it up into a number of pages,
# each with n pages
def pagebreak(request, data, n):
paginator = Paginator(data, 1)
page = request.GET.get('page')
items = paginator.get_page(page)
# Generate page numbers to provide a link to from current page
page_span = 2
last = paginator.num_pages
index = items.number
prev = (index - page_span) if index > page_span else 1
next = (index + page_span) if index < (last - page_span) else last
page_range = paginator.page_range[prev:next]
return items, page_range
|
py
|
1a559031b92445683fb2168e6c1c6197fb556cb6
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
from collections import OrderedDict
class LinearFeatureBaseline(nn.Module):
"""Linear baseline based on handcrafted features, as described in [1]
(Supplementary Material 2).
[1] Yan Duan, Xi Chen, Rein Houthooft, John Schulman, Pieter Abbeel,
"Benchmarking Deep Reinforcement Learning for Continuous Control", 2016
(https://arxiv.org/abs/1604.06778)
"""
def __init__(self, input_size, reg_coeff=1e-5):
super(LinearFeatureBaseline, self).__init__()
self.input_size = input_size
self._reg_coeff = reg_coeff
self.weight = nn.Parameter(torch.Tensor(self.feature_size,),
requires_grad=False)
self.weight.data.zero_()
self._eye = torch.eye(self.feature_size,
dtype=torch.float32,
device=self.weight.device)
@property
def feature_size(self):
return 2 * self.input_size + 4
def _feature(self, episodes):
ones = episodes.mask.unsqueeze(2)
observations = episodes.observations
time_step = torch.arange(len(episodes)).view(-1, 1, 1) * ones / 100.0
return torch.cat([
observations,
observations ** 2,
time_step,
time_step ** 2,
time_step ** 3,
ones
], dim=2)
def fit(self, episodes):
# sequence_length * batch_size x feature_size
featmat = self._feature(episodes).view(-1, self.feature_size)
# sequence_length * batch_size x 1
returns = episodes.returns.view(-1, 1)
# Remove blank (all-zero) episodes that only exist because episode lengths vary
flat_mask = episodes.mask.flatten()
flat_mask_nnz = torch.nonzero(flat_mask, as_tuple=False)
featmat = featmat[flat_mask_nnz].view(-1, self.feature_size)
returns = returns[flat_mask_nnz].view(-1, 1)
reg_coeff = self._reg_coeff
XT_y = torch.matmul(featmat.t(), returns)
XT_X = torch.matmul(featmat.t(), featmat)
for _ in range(5):
try:
coeffs, _ = torch.lstsq(XT_y, XT_X + reg_coeff * self._eye)
# An extra round of increasing regularization eliminated
# inf or nan in the least-squares solution most of the time
if torch.isnan(coeffs).any() or torch.isinf(coeffs).any():
raise RuntimeError
break
except RuntimeError:
reg_coeff *= 10
else:
raise RuntimeError('Unable to solve the normal equations in '
'`LinearFeatureBaseline`. The matrix X^T*X (with X the design '
'matrix) is not full-rank, regardless of the regularization '
'(maximum regularization: {0}).'.format(reg_coeff))
self.weight.copy_(coeffs.flatten())
def forward(self, episodes):
features = self._feature(episodes)
values = torch.mv(features.view(-1, self.feature_size), self.weight)
return values.view(features.shape[:2])
|
py
|
1a559084b3dc46239ef02ae1fed352f69c77d406
|
import random
from collections import Iterable
from dynaconf import settings
def lang_raw(lang_code, *path):
package = settings.LANG[lang_code]
for p in path:
package = package[p]
return package
def lang(lang_code, *path):
package = settings.LANG[lang_code]
for p in path:
package = package[p]
if isinstance(package, Iterable) and not isinstance(package, str):
return random.choice(list(package))
else:
return package
def limitation(name):
return settings.LIMITATIONS[name]
|
py
|
1a5590d842116e6e3b7cbbaeeb65a77fd99cd60a
|
from gui.blueprints.blueprint import Blueprint
from utils.string_utils import StringUtils
from blueprints.attribute_blueprint import AttributeBlueprint as AB
from utils import logger_utils
import pygame as pg
from utils.gui_utils import Themes
class AttributeBlueprint(Blueprint):
SIZE = [.2, .1]
DATA_TYPE = {
"none": "ID_NONE",
"int": "ID_INTEGER",
"string": "ID_STRING",
"char": "ID_CHARACTER",
"float": "ID_FLOAT"
}
def __init__(self, panel):
Blueprint.__init__(self, panel, AB())
self.__logger = logger_utils.get_logger(__name__)
self.set_custom_size(AttributeBlueprint.SIZE)
self.data_type_pressed = [False, None] # IS PRESSED; TEXT BOX
self.data_type_selection = list()
self.change_font(pg.font.Font(Themes.DEFAULT_THEME.get("text_font_style"), int(self.get_rect().height * .23)))
def reset_selection(self):
super().reset_selection()
self.data_type_pressed = [False, None]
self.data_type_selection = list()
def initialize(self, coords, size, blueprint, panel):
super().initialize(coords, size, blueprint, panel)
self.change_font(pg.font.Font(Themes.DEFAULT_THEME.get("text_font_style"), int(self.get_rect().height * .23)))
# TODO add additional data
def get_data(self):
data = super().get_data()
data[1] = StringUtils.get_string("ID_ATTRIBUTE")
data[2] = StringUtils.get_string(AttributeBlueprint.DATA_TYPE.get(self.get_blueprint().get_data_type()))
data[3] = self.get_blueprint().get_value()
return data
def set_data(self, index, data):
if index == 2:
for key, value in AttributeBlueprint.DATA_TYPE.items():
if data == StringUtils.get_string(value):
self.get_blueprint().set_data_type(key)
elif index == 3:
self.get_blueprint().set_value(data)
super().set_data(index, data)
self.update_displayed_data(self.font.render(self.get_blueprint().name,
True, Themes.DEFAULT_THEME.get("font")))
def update_displayed_data(self, text):
super().update_displayed_data(text)
|
py
|
1a5590fad5775a23d01473f119cb1e3b91f64703
|
""" Code for `daugman_visual_explanation.ipynb`
"""
import cv2
import numpy as np
import matplotlib.pyplot as plt
import itertools
import random
from daugman import daugman
from daugman import find_iris
from typing import List, Tuple, Iterable
class DaugmanVisualExplanation:
def __init__(self, img_path: str, start_r=10, end_r=30, circle_step=2, points_step=3):
self.img = self._get_new_image(img_path)
self.start_r = start_r
self.end_r = end_r
self.circle_step = circle_step
self.points_step = points_step
self.all_points = self._get_all_potential_iris_centers(self.img)
self.colors = self._get_unique_color_for_each_point(self.all_points)
def _get_new_image(self, img_path, gray=False) -> np.ndarray:
""" Get properly cropped BGR image, which looks like grayscale
"""
img = cv2.imread(img_path)
img = img[20:130, 20:130]
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if not gray:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
return img
def _get_all_potential_iris_centers(self, img: np.ndarray) -> List[Tuple[int, int]]:
# get all potential points for search (from `find_iris()`)
h = img.shape[0]
# we will look only on dots within central 1/3 of image
single_axis_range = range(int(h / 3), h - int(h / 3), self.points_step)
all_points = list(itertools.product(single_axis_range, single_axis_range))
return all_points
def _get_unique_color_for_each_point(self, all_points: List[Tuple[int, int]]) -> List[Tuple[int, int, int]]:
colors = [(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) for i in self.all_points]
return colors
def plot_all_potential_iris_centers(self) -> np.ndarray:
# plot all potential points
img_dot = self.img.copy()
for point, color in zip(self.all_points, self.colors):
cv2.circle(img_dot, point, 0, color, -1)
_ = plt.imshow(img_dot[::, ::, ::-1])
return img_dot
def plot_circles_for_one_center(self, img_dot: np.ndarray, dot_idx=0) -> np.ndarray:
img_circles = img_dot.copy()
# within circles in radii range from 10px to 1/4 of image side
# plot the chosen potential point
cv2.circle(img_circles, list(self.all_points)[dot_idx], 0, self.colors[dot_idx], 1)
# plot all circle candidates for the single potential point
img_circles = self._draw_circles(img_circles, self.all_points[dot_idx], self.colors[dot_idx],
start_r=self.start_r, end_r=self.end_r, step=self.circle_step)
_ = plt.imshow(img_circles[::, ::, ::-1])
return img_circles
def _draw_circles(self, img: np.ndarray,
center: Tuple[int, int], color: Tuple[int, int, int],
start_r: int, end_r: int, step: int,
alpha=0.5) -> np.ndarray:
""" Part of ``daugman()`` modified for presentation purposes
"""
# get separate coordinates
x, y = center
overlay = img.copy()
radii = list(range(start_r, end_r, step))
for r in radii:
cv2.circle(overlay, center, r, color, 1)
img = cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0)
return img
def plot_best_circle_for_single_potential_iris_center(self, img_dot: np.ndarray,
dot_idx: int, color=None, alpha=0.8) -> np.ndarray:
gray_img = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
# get best circle
_, best_radius = daugman(gray_img, self.all_points[dot_idx],
self.start_r, self.end_r, self.circle_step)
# plot best circle
if not color:
color = self.colors[dot_idx]
overlay = img_dot.copy()
cv2.circle(overlay, self.all_points[dot_idx], best_radius, color, 1)
img_dot = cv2.addWeighted(overlay, alpha, img_dot, 1 - alpha, 0)
return img_dot
def plot_best_circle_for_a_few_potential_iris_centers(self, img_dot: np.ndarray,
idxs: Iterable[int]) -> np.ndarray:
img = img_dot.copy()
for idx in idxs:
img = self.plot_best_circle_for_single_potential_iris_center(img, idx)
_ = plt.imshow(img[::, ::, ::-1])
return img_dot
def find_iris(self, *, daugman_start, daugman_end, daugman_step, points_step) -> np.ndarray:
gray_img = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
answer = find_iris(gray_img, daugman_start=daugman_start, daugman_end=daugman_end,
daugman_step=daugman_step, points_step=points_step)
iris_center, iris_rad = answer
out = self.img.copy()
cv2.circle(out, iris_center, iris_rad, (0, 0, 255), 1)
_ = plt.imshow(out[::, ::, ::-1])
return out
def plot_pixel_intensity_delta_pic(self) -> None:
# white image
img = np.full([100, 100, 3], 255, dtype=np.uint8)
# black circle
img = cv2.circle(img, (50, 50), 20, [0, 0, 0], -1)
# yellow
img = cv2.circle(img, (50, 50), 10, [255, 255, 0], 1)
# green
img = cv2.circle(img, (50, 50), 15, [0, 255, 0], 1)
# red
img = cv2.circle(img, (50, 50), 20, [255, 0, 0], 1)
# blue
img = cv2.circle(img, (50, 50), 25, [0, 0, 255], 1)
_ = plt.imshow(img)
def find_iris_on_binary_image(self, *, daugman_start, daugman_end, daugman_step, points_step) -> None:
# create simple image
img = np.full([100, 100, 3], 255, dtype=np.uint8)
img = cv2.circle(img, (50, 50), 20, [0, 0, 0], -1)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
answer = find_iris(gray_img, daugman_start=daugman_start, daugman_end=daugman_end,
daugman_step=daugman_step, points_step=points_step)
iris_center, iris_rad = answer
cv2.circle(img, iris_center, iris_rad, (0, 0, 255), 1)
_ = plt.imshow(img[::, ::, ::-1])
|
py
|
1a55920d86311a6a10a70d935a4521b2bba7ae0d
|
import math
import torch.nn as nn
class VGG(nn.Module):
'''
VGG model
'''
def __init__(self, features):
super(VGG, self).__init__()
self.features = features
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Linear(512, 10),
)
# Initialize weights
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def vgg11():
"""VGG 11-layer model (configuration "A")"""
return VGG(make_layers(cfg['A']))
|
py
|
1a5592d41716c020ed7917523a8c5a280355fa53
|
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Libraries for interacting with the type of a computation."""
from tensorflow_federated.python.core.impl.types.computation_types import *
from tensorflow_federated.python.core.impl.types.type_analysis import contains
from tensorflow_federated.python.core.impl.types.type_analysis import count
from tensorflow_federated.python.core.impl.types.type_analysis import is_structure_of_floats
from tensorflow_federated.python.core.impl.types.type_analysis import is_structure_of_integers
from tensorflow_federated.python.core.impl.types.type_analysis import is_structure_of_tensors
from tensorflow_federated.python.core.impl.types.type_analysis import is_tensorflow_compatible_type
from tensorflow_federated.python.core.impl.types.type_conversions import type_from_tensors
from tensorflow_federated.python.core.impl.types.type_conversions import type_to_py_container
from tensorflow_federated.python.core.impl.types.type_conversions import type_to_tf_tensor_specs
from tensorflow_federated.python.core.impl.types.type_serialization import deserialize_type
from tensorflow_federated.python.core.impl.types.type_serialization import serialize_type
|
py
|
1a5593905433dfd2c66949a872afb662bd707f7f
|
"""Very simple breakout clone. A circle shape serves as the paddle, then
breakable bricks constructed of Poly-shapes.
The code showcases several pymunk concepts such as elasitcity, impulses,
constant object speed, joints, collision handlers and post step callbacks.
"""
import math, sys, random
import os
import pygame
from pygame.locals import *
from pygame.color import *
import pymunk
from pymunk import Vec2d
import pymunk.pygame_util
width, height = 600,600
collision_types = {
"ball": 1,
"brick": 2,
"bottom": 3,
"player": 4,
}
def spawn_ball(space, position, direction):
ball_body = pymunk.Body(1, pymunk.inf)
ball_body.position = position
ball_shape = pymunk.Circle(ball_body, 5)
ball_shape.color = THECOLORS["green"]
ball_shape.elasticity = 1.0
ball_shape.collision_type = collision_types["ball"]
ball_body.apply_impulse_at_local_point(Vec2d(direction))
# Keep ball velocity at a static value
def constant_velocity(body, gravity, damping, dt):
body.velocity = body.velocity.normalized() * 400
ball_body.velocity_func = constant_velocity
space.add(ball_body, ball_shape)
def setup_level(space, player_body):
# Remove balls and bricks
for s in space.shapes[:]:
if s.body.body_type == pymunk.Body.DYNAMIC and s.body not in [player_body]:
space.remove(s.body, s)
# Spawn a ball for the player to have something to play with
spawn_ball(space, player_body.position + (0,40), random.choice([(1,10),(-1,10)]))
# Spawn bricks
for x in range(0,21):
x = x * 20 + 100
for y in range(0,5):
y = y * 10 + 400
brick_body = pymunk.Body(body_type=pymunk.Body.KINEMATIC)
brick_body.position = x, y
brick_shape = pymunk.Poly.create_box(brick_body, (20,10))
brick_shape.elasticity = 1.0
brick_shape.color = THECOLORS['blue']
brick_shape.group = 1
brick_shape.collision_type = collision_types["brick"]
space.add(brick_body, brick_shape)
# Make bricks be removed when hit by ball
def remove_brick(arbiter, space, data):
brick_shape = arbiter.shapes[0]
space.remove(brick_shape, brick_shape.body)
h = space.add_collision_handler(
collision_types["brick"],
collision_types["ball"])
h.separate = remove_brick
def main():
### PyGame init
pygame.init()
screen = pygame.display.set_mode((width,height))
clock = pygame.time.Clock()
running = True
font = pygame.font.SysFont("Arial", 16)
### Physics stuff
space = pymunk.Space()
draw_options = pymunk.pygame_util.DrawOptions(screen)
### Game area
# walls - the left-top-right walls
static_lines = [pymunk.Segment(space.static_body, (50, 50), (50, 550), 2)
,pymunk.Segment(space.static_body, (50, 550), (550, 550), 2)
,pymunk.Segment(space.static_body, (550, 550), (550, 50), 2)
]
for line in static_lines:
line.color = THECOLORS['lightgray']
line.elasticity = 1.0
space.add(static_lines)
# bottom - a sensor that removes anything touching it
bottom = pymunk.Segment(space.static_body, (50, 50), (550, 50), 2)
bottom.sensor = True
bottom.collision_type = collision_types["bottom"]
bottom.color = THECOLORS['red']
def remove_first(arbiter, space, data):
ball_shape = arbiter.shapes[0]
space.remove(ball_shape, ball_shape.body)
return True
h = space.add_collision_handler(
collision_types["ball"],
collision_types["bottom"])
h.begin = remove_first
space.add(bottom)
### Player ship
player_body = pymunk.Body(500, pymunk.inf)
player_body.position = 300,100
player_shape = pymunk.Segment(player_body, (-50,0), (50,0), 8)
player_shape.color = THECOLORS["red"]
player_shape.elasticity = 1.0
player_shape.collision_type = collision_types["player"]
def pre_solve(arbiter, space, data):
# We want to update the collision normal to make the bounce direction
# dependent of where on the paddle the ball hits. Note that this
# calculation isn't perfect, but just a quick example.
set_ = arbiter.contact_point_set
if len(set_.points) > 0:
player_shape = arbiter.shapes[0]
width = (player_shape.b - player_shape.a).x
delta = (player_shape.body.position - set_.points[0].point_a.x).x
normal = Vec2d(0, 1).rotated(delta / width / 2)
set_.normal = normal
set_.points[0].distance = 0
arbiter.contact_point_set = set_
return True
h = space.add_collision_handler(
collision_types["player"],
collision_types["ball"])
h.pre_solve = pre_solve
# restrict movement of player to a straigt line
move_joint = pymunk.GrooveJoint(space.static_body, player_body, (100,100), (500,100), (0,0))
space.add(player_body, player_shape, move_joint)
global state
# Start game
setup_level(space, player_body)
while running:
for event in pygame.event.get():
if event.type == QUIT:
running = False
elif event.type == KEYDOWN and (event.key in [K_ESCAPE, K_q]):
running = False
elif event.type == KEYDOWN and event.key == K_p:
pygame.image.save(screen, "breakout.png")
elif event.type == KEYDOWN and event.key == K_LEFT:
player_body.velocity = (-600,0)
elif event.type == KEYUP and event.key == K_LEFT:
player_body.velocity = 0,0
elif event.type == KEYDOWN and event.key == K_RIGHT:
player_body.velocity = (600,0)
elif event.type == KEYUP and event.key == K_RIGHT:
player_body.velocity = 0,0
elif event.type == KEYDOWN and event.key == K_r:
setup_level(space, player_body)
elif event.type == KEYDOWN and event.key == K_SPACE:
spawn_ball(space, player_body.position + (0,40), random.choice([(1,10),(-1,10)]))
### Clear screen
screen.fill(THECOLORS["black"])
### Draw stuff
space.debug_draw(draw_options)
state = []
for x in space.shapes:
s = "%s %s %s" % (x, x.body.position, x.body.velocity)
state.append(s)
### Update physics
fps = 60
dt = 1./fps
space.step(dt)
### Info and flip screen
screen.blit(font.render("fps: " + str(clock.get_fps()), 1, THECOLORS["white"]), (0,0))
screen.blit(font.render("Move with left/right arrows, space to spawn a ball", 1, THECOLORS["darkgrey"]), (5,height - 35))
screen.blit(font.render("Press R to reset, ESC or Q to quit", 1, THECOLORS["darkgrey"]), (5,height - 20))
pygame.display.flip()
clock.tick(fps)
if __name__ == '__main__':
sys.exit(main())
|
py
|
1a5593dc2cbe7858b0bdab2b98618691f453cdca
|
"""
Module for Optuna hyperparameter optimization (optuna.org)
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import zip
from builtins import range
from builtins import open
from builtins import str
from future import standard_library
standard_library.install_aliases()
# required to make json saving work in Python 2/3
try:
to_unicode = unicode
except NameError:
to_unicode = str
import imp
import json
import logging
import datetime
import os
import signal
import glob
from copy import copy
from random import Random
from time import sleep, time
from itertools import product
from subprocess import Popen, PIPE
import importlib, types
import numpy.random as nr
import numpy as np
import pickle
from neuron import h
from netpyne import sim, specs
import optuna
from .utils import createFolder
from .utils import bashTemplate
from .utils import dcp, sigfig
pc = h.ParallelContext() # use bulletin board master/slave
# -------------------------------------------------------------------------------
# Optuna optimization
# -------------------------------------------------------------------------------
# func needs to be outside of class
def runJob(nrnCommand, script, cfgSavePath, netParamsSavePath, simDataPath):
"""
Function for/to <short description of `netpyne.batch.optuna_parallel.runJob`>
Parameters
----------
script : <type>
<Short description of script>
**Default:** *required*
cfgSavePath : <type>
<Short description of cfgSavePath>
**Default:** *required*
netParamsSavePath : <type>
<Short description of netParamsSavePath>
**Default:** *required*
simDataPath : <type>
<Short description of simDataPath>
**Default:** *required*
"""
import os
print('\nJob in rank id: ',pc.id())
command = '%s %s simConfig=%s netParams=%s' % (nrnCommand, script, cfgSavePath, netParamsSavePath)
print(command)
with open(simDataPath+'.run', 'w') as outf, open(simDataPath+'.err', 'w') as errf:
pid = Popen(command.split(' '), stdout=outf, stderr=errf, preexec_fn=os.setsid).pid
with open('./pids.pid', 'a') as file:
file.write(str(pid) + ' ')
def optunaOptim(self, pc):
"""
Function for/to <short description of `netpyne.batch.optuna_parallel.optunaOptim`>
Parameters
----------
self : <type>
<Short description of self>
**Default:** *required*
pc : <type>
<Short description of pc>
**Default:** *required*
"""
import sys
# -------------------------------------------------------------------------------
# Optuna optimization: Parallel evaluation
# -------------------------------------------------------------------------------
def objective(trial, args):
import os
ngen = trial.number
total_jobs = 0
# options slurm, mpi
type = args.get('type', 'mpi_direct')
# params
paramLabels = args.get('paramLabels', [])
minVals = args.get('minVals', [])
maxVals = args.get('maxVals', [])
# paths to required scripts
script = args.get('script', 'init.py')
netParamsSavePath = args.get('netParamsSavePath')
genFolderPath = self.saveFolder + '/trial_' + str(ngen)
# mpi command setup
nodes = args.get('nodes', 1)
coresPerNode = args.get('coresPerNode', 1)
mpiCommand = args.get('mpiCommand', 'mpiexec')
nrnCommand = args.get('nrnCommand', 'nrniv -python -mpi')
numproc = nodes*coresPerNode
# slurm setup
custom = args.get('custom', '')
folder = args.get('folder', '.')
email = args.get('email', '[email protected]')
walltime = args.get('walltime', '00:01:00')
reservation = args.get('reservation', None)
allocation = args.get('allocation', 'csd403') # NSG account
# fitness function
fitnessFunc = args.get('fitnessFunc')
fitnessFuncArgs = args.get('fitnessFuncArgs')
maxFitness = args.get('maxFitness')
# read params or set defaults
sleepInterval = args.get('sleepInterval', 0.2)
# create folder if it does not exist
createFolder(genFolderPath)
# --------------------------------------
# generate param values for optuna trial
candidate = []
for paramLabel, minVal, maxVal in zip(paramLabels, minVals, maxVals):
candidate.append(trial.suggest_uniform(str(paramLabel), minVal, maxVal))
# remember pids and jobids in a list
pids = []
jobids = {}
# create a job for the candidate
candidate_index = 0
sleep(sleepInterval) # required for slurm
# name and path
jobName = "trial_" + str(ngen)
jobPath = genFolderPath + '/' + jobName
# set initial cfg initCfg
if len(self.initCfg) > 0:
for paramLabel, paramVal in self.initCfg.items():
self.setCfgNestedParam(paramLabel, paramVal)
# modify cfg instance with candidate values
#print(paramLabels, candidate)
for label, value in zip(paramLabels, candidate):
print('set %s=%s' % (label, value))
self.setCfgNestedParam(label, value)
#self.setCfgNestedParam("filename", jobPath)
self.cfg.simLabel = jobName
self.cfg.saveFolder = genFolderPath
# save cfg instance to file
cfgSavePath = jobPath + '_cfg.json'
self.cfg.save(cfgSavePath)
if type=='mpi_bulletin':
# ----------------------------------------------------------------------
# MPI master-slaves
# ----------------------------------------------------------------------
pc.submit(runJob, nrnCommand, script, cfgSavePath, netParamsSavePath, jobPath)
print('-'*80)
else:
# ----------------------------------------------------------------------
# MPI job commnand
# ----------------------------------------------------------------------
if mpiCommand == '':
command = '%s %s simConfig=%s netParams=%s ' % (nrnCommand, script, cfgSavePath, netParamsSavePath)
else:
command = '%s -n %d %s %s simConfig=%s netParams=%s ' % (mpiCommand, numproc, nrnCommand, script, cfgSavePath, netParamsSavePath)
# ----------------------------------------------------------------------
# run on local machine with <nodes*coresPerNode> cores
# ----------------------------------------------------------------------
if type=='mpi_direct':
executer = '/bin/bash'
jobString = bashTemplate('mpi_direct') %(custom, folder, command)
# ----------------------------------------------------------------------
# run on HPC through slurm
# ----------------------------------------------------------------------
elif type=='hpc_slurm':
executer = 'sbatch'
res = '#SBATCH --res=%s' % (reservation) if reservation else ''
jobString = bashTemplate('hpc_slurm') % (jobName, allocation, walltime, nodes, coresPerNode, jobPath, jobPath, email, res, custom, folder, command)
# ----------------------------------------------------------------------
# run on HPC through PBS
# ----------------------------------------------------------------------
elif type=='hpc_torque':
executer = 'qsub'
queueName = args.get('queueName', 'default')
nodesppn = 'nodes=%d:ppn=%d' % (nodes, coresPerNode)
jobString = bashTemplate('hpc_torque') % (jobName, walltime, queueName, nodesppn, jobPath, jobPath, custom, command)
# ----------------------------------------------------------------------
# save job and run
# ----------------------------------------------------------------------
print('Submitting job ', jobName)
print(jobString)
print('-'*80)
# save file
batchfile = '%s.sbatch' % (jobPath)
with open(batchfile, 'w') as text_file:
text_file.write("%s" % jobString)
if type == 'mpi_direct':
with open(jobPath+'.run', 'a+') as outf, open(jobPath+'.err', 'w') as errf:
pids.append(Popen([executer, batchfile], stdout=outf, stderr=errf, preexec_fn=os.setsid).pid)
else:
with open(jobPath+'.jobid', 'w') as outf, open(jobPath+'.err', 'w') as errf:
pids.append(Popen([executer, batchfile], stdout=outf, stderr=errf, preexec_fn=os.setsid).pid)
#proc = Popen(command.split([executer, batchfile]), stdout=PIPE, stderr=PIPE)
sleep(0.1)
#read = proc.stdout.read()
if type == 'mpi_direct':
with open('./pids.pid', 'a') as file:
file.write(str(pids))
else:
with open(jobPath+'.jobid', 'r') as outf:
read=outf.readline()
print(read)
if len(read) > 0:
jobid = int(read.split()[-1])
jobids[candidate_index] = jobid
print('jobids', jobids)
total_jobs += 1
sleep(0.1)
# ----------------------------------------------------------------------
# gather data and compute fitness
# ----------------------------------------------------------------------
if type == 'mpi_bulletin':
# wait for pc bulletin board jobs to finish
try:
while pc.working():
sleep(1)
#pc.done()
except:
pass
num_iters = 0
jobs_completed = 0
fitness = [None] # just 1 candidate
# print outfilestem
print("Waiting for jobs from generation %d/%d ..." %(ngen, args.get('maxiters')))
# print "PID's: %r" %(pids)
# start fitness calculation
while jobs_completed < total_jobs:
unfinished = [i for i, x in enumerate(fitness) if x is None ]
for candidate_index in unfinished:
try: # load simData and evaluate fitness
jobNamePath = genFolderPath + "/trial_" + str(ngen)
if os.path.isfile(jobNamePath+'.json'):
with open('%s.json'% (jobNamePath)) as file:
simData = json.load(file)['simData']
fitness[candidate_index] = fitnessFunc(simData, **fitnessFuncArgs)
jobs_completed += 1
print(' Candidate %d fitness = %.1f' % (candidate_index, fitness[candidate_index]))
elif os.path.isfile(jobNamePath+'.pkl'):
with open('%s.pkl'% (jobNamePath), 'rb') as file:
simData = pickle.load(file)['simData']
fitness[candidate_index] = fitnessFunc(simData, **fitnessFuncArgs)
jobs_completed += 1
print(' Candidate %d fitness = %.1f' % (candidate_index, fitness[candidate_index]))
except Exception as e:
err = "There was an exception evaluating candidate %d:"%(candidate_index)
print(("%s \n %s"%(err,e)))
num_iters += 1
print('completed: %d' %(jobs_completed))
if num_iters >= args.get('maxiter_wait', 5000):
print("Max iterations reached, the %d unfinished jobs will be canceled and set to default fitness" % (len(unfinished)))
for canditade_index in unfinished:
fitness[canditade_index] = maxFitness # rerun those that didn't complete;
jobs_completed += 1
try:
if 'scancelUser' in kwargs:
os.system('scancel -u %s'%(kwargs['scancelUser']))
else:
os.system('scancel %d' % (jobids[candidate_index])) # terminate unfinished job (resubmitted jobs not terminated!)
except:
pass
sleep(args.get('time_sleep', 1))
# kill all processes
if type == 'mpi_bulletin':
try:
with open("./pids.pid", 'r') as file: # read pids for mpi_bulletin
pids = [int(i) for i in file.read().split(' ')[:-1]]
with open("./pids.pid", 'w') as file: # delete content
pass
for pid in pids:
try:
os.killpg(os.getpgid(pid), signal.SIGTERM)
except:
pass
except:
pass
elif type == 'mpi_direct':
import psutil
PROCNAME = "nrniv"
for proc in psutil.process_iter():
# check whether the process name matches
try:
if proc.name() == PROCNAME:
proc.kill()
except:
pass
# don't want to to this for hpcs since jobs are running on compute nodes not master
print("-" * 80)
print(" Completed a generation ")
print("-" * 80)
return fitness[0] # single candidate for now
# -------------------------------------------------------------------------------
# Optuna optimization: Main code
# -------------------------------------------------------------------------------
import os
from time import sleep
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
except:
size = 1
rank = 0
# create main sim directory and save scripts
self.saveScripts()
global ngen
ngen = -1
# gather **kwargs
args = {}
args['popsize'] = self.optimCfg.get('popsize', 1)
args['minVals'] = [x['values'][0] for x in self.params]
args['maxVals'] = [x['values'][1] for x in self.params]
args['cfg'] = self.cfg # include here args/params to pass to evaluator function
args['paramLabels'] = [x['label'] for x in self.params]
args['netParamsSavePath'] = self.saveFolder + '/' + self.batchLabel + '_netParams.py'
args['maxiters'] = self.optimCfg['maxiters'] if 'maxiters' in self.optimCfg else 1000
args['maxtime'] = self.optimCfg['maxtime'] if 'maxtime' in self.optimCfg else None
args['fitnessFunc'] = self.optimCfg['fitnessFunc']
args['fitnessFuncArgs'] = self.optimCfg['fitnessFuncArgs']
args['maxiter_wait'] = self.optimCfg['maxiter_wait']
args['time_sleep'] = self.optimCfg['time_sleep']
args['maxFitness'] = self.optimCfg.get('maxFitness', 1000)
args['direction'] = self.optimCfg['direction'] if 'direction' in self.optimCfg else 'minimize'
for key, value in self.optimCfg.items():
args[key] = value
for key, value in self.runCfg.items():
args[key] = value
# if using pc bulletin board, initialize all workers
if self.runCfg.get('type', None) == 'mpi_bulletin':
for iworker in range(int(pc.nhost())):
pc.runworker()
# -------------------------------------------------------------------------------
# Run algorithm
# -------------------------------------------------------------------------------
sleep(rank) # each process wiats a different time to avoid saturating sqlite database
study = optuna.create_study(study_name=self.batchLabel, storage='sqlite:///%s/%s_storage.db' % (self.saveFolder, self.batchLabel),
load_if_exists=True, direction=args['direction'])
try:
study.optimize(lambda trial: objective(trial, args), n_trials=args['maxiters'], timeout=args['maxtime'])
except Exception as e:
print(e)
# print best and finish
if rank == size-1:
df = study.trials_dataframe(attrs=('number', 'value', 'params', 'state'))
importance = optuna.importance.get_param_importances(study=study)
print('\nBest trial: ', study.best_trial)
print('\nParameter importance: ', dict(importance))
print('\nBest Solution with fitness = %.4g: \n' % (study.best_value), study.best_params)
print('\nSaving to output.pkl...\n')
output = {'study': study, 'df': df, 'importance': importance}
with open('%s/%s_output.pkl' % (self.saveFolder, self.batchLabel), 'wb') as f:
pickle.dump(output, f)
sleep(1)
print("-" * 80)
print(" Completed Optuna parameter optimization ")
print("-" * 80)
sys.exit()
|
py
|
1a5594ed594c956000fa19a073be97fdc9823df4
|
# Standard library imports
import sqlite3
from dataclasses import asdict
# Third party imports
import pandas as pd
from spotify_flows.spotify.artists import read_artists_from_id
from spotify_flows.database import SpotifyDatabase
# Main body
def main():
db = SpotifyDatabase("data/spotify.db", op_table="operations")
df_related, df_artists = db.table_contents(["related", "artists"])
df_related = df_related.drop_duplicates()
enriched_artist_ids = df_artists.loc[:, "id"].unique().tolist()
all_artist_ids = set(
df_related["artist_id"].tolist() + df_related["related_artist_id"].tolist()
)
artists_to_enrich = [id for id in all_artist_ids if id not in enriched_artist_ids]
remaining_artists = artists_to_enrich
while remaining_artists:
n = min(len(remaining_artists), 50)
artists = read_artists_from_id(artist_ids=remaining_artists[:n])
df_data = pd.DataFrame([asdict(artist) for artist in artists]).drop(
columns=["genres"]
)
db.enrich_database_table(df_data=df_data, table="artists")
remaining_artists = remaining_artists[n:]
if __name__ == "__main__":
raise SystemExit(main())
|
py
|
1a559519e298ca9aaae5f933db2803c42b8e7a3b
|
import glob
import os
import pytest
from cli.src.helpers.build_io import get_build_path
from cli.src.helpers.data_loader import load_schema_obj, load_all_schema_objs, load_all_schema_objs_from_directory,\
load_template_file, load_json_obj, types, SCHEMA_DIR
from tests.unit.helpers.constants import CLUSTER_NAME_LOAD, NON_EXISTING_CLUSTER, TEST_DOCS, OUTPUT_PATH, TEST_INVENTORY, TEST_JSON,\
TEST_JSON_NAME, TEST_CLUSTER_MODEL
TEST_MINIMAL_CLUSTER_CONFIG = {
'kind': 'epiphany-cluster',
'title': 'Epiphany cluster Config',
'provider': 'aws',
'name': 'default',
'specification':
{
'name': 'name',
'prefix': 'prefix',
'admin_user':
{
'name': 'ubuntu',
'key_path': '/shared/.ssh/epiphany-operations/id_rsa'
},
'cloud':
{
'k8s_as_cloud_service': False,
'use_public_ips': False,
'credentials':
{
'key': 'XXXX-XXXX-XXXX',
'secret': 'XXXXXXXXXXXXXXXX'
},
'default_os_image': 'default'
},
'components':
{
'repository': {'count': 1},
'kubernetes_master': {'count': 1},
'kubernetes_node': {'count': 2},
'logging': {'count': 1},
'monitoring': {'count': 1},
'kafka': {'count': 2},
'postgresql': {'count': 1},
'load_balancer': {'count': 1},
'rabbitmq': {'count': 1}
}
}
}
def test_load_schema_obj():
yaml_obj = load_schema_obj(types.DEFAULT, 'aws', 'configuration/minimal-cluster-config')
assert yaml_obj == TEST_MINIMAL_CLUSTER_CONFIG
def test_load_all_schema_objs():
yaml_objs = load_all_schema_objs(types.DEFAULT, 'aws', 'configuration/minimal-cluster-config')
assert yaml_objs == [TEST_MINIMAL_CLUSTER_CONFIG]
def test_load_all_schema_objs_from_directory():
defaults = load_all_schema_objs_from_directory(types.DEFAULT, 'common', 'configuration')
directory_path = os.path.join(SCHEMA_DIR, 'common', types.DEFAULT, 'configuration')
assert len(defaults) == len(glob.glob(os.path.join(directory_path, '*.yml')))
def test_load_template_file():
template = load_template_file(types.ANSIBLE, '', 'inventory')
content = template.render(inventory=TEST_INVENTORY, cluster_model=TEST_CLUSTER_MODEL)
assert 'test-1 ansible_host=10.0.0.1' in content
assert 'test-2 ansible_host=10.0.0.2' in content
assert 'test-3 ansible_host=10.0.0.3' in content
assert 'test-4 ansible_host=10.0.0.4' in content
assert 'ansible_user=operations' in content
assert 'ansible_ssh_private_key_file=id_rsa' in content
def test_load_json_obj():
loaded_json = load_json_obj(os.path.join(OUTPUT_PATH, TEST_JSON_NAME))
assert loaded_json == TEST_JSON
def test_load_not_existing_manifest_docs():
build_path = get_build_path(NON_EXISTING_CLUSTER)
with pytest.raises(Exception):
load_manifest(build_path)
|
py
|
1a559623ff8be3a978a6f505cf50cf3f1cdba39f
|
import tempfile
from unittest import TestCase
from qtlayoutbuilder.lib.multiline_string_utils import MultilineString
from qtlayoutbuilder.lib.original_file_rewriter import OriginalFileReWriter
class TestOriginalFileReWriter(TestCase):
# Lower level functions first.
def test_add_backup_location_comment(self):
# First check that we get what we expect when the existing
# one_big_string, does not already have such a comment in.
one_big_string = 'just this text'
mock_backup_folder_string = 'mock_backup_folder'
output = OriginalFileReWriter._add_backup_location_comment(
mock_backup_folder_string, one_big_string)
output = MultilineString.normalise(output)
expected = MultilineString.normalise("""
# This file has been automatically re-formatted.
# Previous versions can be found here:
# mock_backup_folder
##
just this text
""")
self.assertEquals(output, expected)
# Now ensure that if we do it again - but this time with the
# new one_big_string that already has a comment in, the old comment
# gets replaced with the new.
previous_output = output
mock_backup_folder_string = 'DIFFERENT_mock_backup_folder'
new_output = \
OriginalFileReWriter._add_backup_location_comment(
mock_backup_folder_string, previous_output)
new_output = MultilineString.normalise(new_output)
expected = MultilineString.normalise("""
# This file has been automatically re-formatted.
# Previous versions can be found here:
# DIFFERENT_mock_backup_folder
##
just this text
""")
self.assertEquals(new_output, expected)
def test_make_backup_of_existing_file(self):
# Make a file that we will then back up.
orig_fd = tempfile.NamedTemporaryFile(delete=False)
orig_file_path = orig_fd.name
orig_fd.write('original file content')
orig_fd.close()
# Back it up
backup_folder, backup_file_path = \
OriginalFileReWriter._make_backup_of_existing_file(orig_file_path)
# Ensure that the backed up file has the expected content.
with open(backup_file_path, 'r') as read_fd:
content = read_fd.read()
self.assertEqual(content, 'original file content')
# Now at API level
def test_at_api_level(self):
# Make a file that we will then overwrite.
orig_fd = tempfile.NamedTemporaryFile(suffix='.txt', delete=False)
orig_file_path = orig_fd.name
content = MultilineString.shift_left("""
layout QHBoxLayout
widget QWidget
""")
orig_fd.write(content)
orig_fd.close()
# Mandate the overwrite
OriginalFileReWriter.overwrite_original(orig_file_path, 'new content')
# Check for both the presence of the new content, and the
# backup message.
with open(orig_file_path, 'r') as input_file:
content = input_file.read()
self.assertTrue('new content' in content)
self.assertTrue('has been' in content)
|
py
|
1a559746bf3a3cadbde1440782fedc9b74a66fa9
|
# todo_list/todo_app/views.py
from django.views.generic import ListView
from .models import ToDoList, ToDoItem
class ListListView(ListView):
model = ToDoList
template_name = "todo_app/index.html"
class ItemListView(ListView):
model = ToDoItem
template_name = "todo_app/todo_list.html"
def get_queryset(self):
return ToDoItem.objects.filter(todo_list_id=self.kwargs["list_id"])
def get_context_data(self):
context = super().get_context_data()
context["todo_list"] = ToDoList.objects.get(id=self.kwargs["list_id"])
return context
|
py
|
1a559799b6988376d9f472dd5d792aaa5e547763
|
from .engine import SimEngine
import logging
l = logging.getLogger(name=__name__)
class SimEngineFailure(SimEngine): #pylint:disable=abstract-method
def _check(self, state, *args, **kwargs):
jumpkind = state.history.jumpkind
if jumpkind in ('Ijk_EmFail', 'Ijk_MapFail') or jumpkind.startswith('Ijk_Sig'):
return True
if jumpkind == 'Ijk_Exit':
return True
return False
def process(self, state, *args, **kwargs):
from ..procedures import SIM_PROCEDURES
if state.history.jumpkind in ("Ijk_EmFail", "Ijk_MapFail") or "Ijk_Sig" in state.history.jumpkind:
raise AngrExitError("Cannot execute following jumpkind %s" % state.history.jumpkind)
elif state.history.jumpkind == 'Ijk_Exit':
l.debug('Execution terminated at %#x', state.addr)
terminator = SIM_PROCEDURES['stubs']['PathTerminator'](project=self.project)
return self.project.factory.procedure_engine.process(state, terminator, force_addr=state.addr)
else:
return SimSuccessors.failure()
from ..errors import AngrExitError
from .successors import SimSuccessors
|
py
|
1a5598de7ef68c669f9ce6b83d23d4cf64f17883
|
from PIL import ImageGrab #Used to screenshots
#it takes board number 1-6!!
def screen_board(board_no):
grab_displays = ((10,50,630,380),
(650,50,1270,380),
(1290,50,1910,380),
(10,590,630,920),
(650,590,1270,920),
(1290,590,1910,920))
#Screenshot of whole window number board-1
return ImageGrab.grab(bbox=(grab_displays[board_no-1]))
#takes and returns a screenshot of given coords x1 y1 to x2 y2
def screenshot(xyxy):
return ImageGrab.grab(bbox=(xyxy))
|
py
|
1a5598f24f9201dbb373f8c66d683279ed097060
|
# -*- coding: utf-8 -*-
settings = {
'source': 'csv',
#'source': 'mongodb',
'data_path': './data',
'stock_commission': 3 / 10000.0,
'future_commission': 1 / 10000.0,
'tick_test': False,
}
class ConfigLog(object):
log_level = 'INFO'
log_to_file = True
log_to_console = True
log_path = './log'
__all__ = ['settings', 'ConfigLog']
|
py
|
1a5599e35bd63233ef6990d3eafee228e33e1985
|
# _ _
# | | | |
# ___ ___ _ __ ___| |_ __ _ _ __ | |_ ___
# / __/ _ \| '_ \/ __| __/ _` | '_ \| __/ __|
# | (_| (_) | | | \__ \ || (_| | | | | |_\__ \
# \___\___/|_| |_|___/\__\__,_|_| |_|\__|___/
#
"""
constants.py
various fixed values used elsewhere
"""
# place to store downloaded imaged
DOWNLOAD_DIRECTORY = './downloaded_images/'
# directory to save augmented images before sending to S3
TMP_SAVE_DIR = "./aug_img_tmp/"
# should reflect number of distinct transformations in transformations.py
NUM_POSSIBLE_TRANSFORMS = 6
# redis database host app name
HOST = "redis"
# redis database key for the jobs
JOB_NAME = "job2"
|
py
|
1a559a5382f4f3bab16245590561d1201884f37e
|
import datetime
from typing import List
from unittest.mock import patch
import pytz
from django.core import mail
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.utils import timezone
from freezegun import freeze_time
from posthog.email import EmailMessage
from posthog.models import Event, Organization, Person, Team, User
from posthog.tasks.email import send_weekly_email_report
class TestEmail(TestCase):
def create_person(self, team: Team, base_distinct_id: str = "") -> Person:
person = Person.objects.create(team=team)
person.add_distinct_id(base_distinct_id)
return person
@freeze_time("2020-09-21")
def setUp(self):
super().setUp()
self.organization = Organization.objects.create()
self.team = Team.objects.create(organization=self.organization, name="The Bakery")
self.user = User.objects.create(email="[email protected]")
self.user2 = User.objects.create(email="[email protected]")
self.organization.members.add(self.user)
self.organization.members.add(self.user2)
last_week = datetime.datetime(2020, 9, 17, 3, 22, tzinfo=pytz.UTC)
two_weeks_ago = datetime.datetime(2020, 9, 8, 19, 54, tzinfo=pytz.UTC)
self.persons: List = [self.create_person(self.team, str(i)) for i in range(0, 7)]
# Resurrected
self.persons[0].created_at = timezone.now() - datetime.timedelta(weeks=3)
self.persons[0].save()
self.persons[1].created_at = timezone.now() - datetime.timedelta(weeks=4)
self.persons[1].save()
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=0)
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=1)
# Retained
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=2)
Event.objects.create(team=self.team, timestamp=two_weeks_ago, distinct_id=2)
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=3)
Event.objects.create(team=self.team, timestamp=two_weeks_ago, distinct_id=3)
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=4)
Event.objects.create(team=self.team, timestamp=two_weeks_ago, distinct_id=4)
# New
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=5)
Event.objects.create(team=self.team, timestamp=last_week, distinct_id=5)
# Churned
Event.objects.create(team=self.team, timestamp=two_weeks_ago, distinct_id=6)
def test_cant_send_emails_if_not_properly_configured(self) -> None:
with self.settings(EMAIL_HOST=None):
with self.assertRaises(ImproperlyConfigured) as e:
EmailMessage("Subject", "template")
self.assertEqual(
str(e.exception), "Email settings not configured! Set at least the EMAIL_HOST environment variable.",
)
@freeze_time("2020-09-21")
def test_weekly_email_report(self) -> None:
with self.settings(
EMAIL_HOST="localhost", SITE_URL="http://localhost:9999",
):
send_weekly_email_report()
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].to, ["[email protected]"])
self.assertEqual(mail.outbox[1].to, ["[email protected]"])
self.assertEqual(
mail.outbox[0].subject, "PostHog weekly report for Sep 14, 2020 to Sep 20",
)
self.assertEqual(
mail.outbox[0].body, "",
) # no plain-text version support yet
html_message = mail.outbox[0].alternatives[0][0] # type: ignore
self.assertIn(
"http://localhost:9999/static/posthog-logo.png", html_message,
) # absolute URLs are used
self.assertIn('style="font-weight: 300"', html_message) # CSS is inlined
self.assertIn(
"Your PostHog weekly report is ready! Your team had 6 active users last week! 🎉", html_message,
) # preheader
@patch("posthog.tasks.email.EmailMessage")
@freeze_time("2020-09-21")
def test_weekly_email_report_content(self, mock_email_message):
with self.settings(EMAIL_HOST="localhost"):
send_weekly_email_report()
self.assertEqual(
mock_email_message.call_args[0][0], "PostHog weekly report for Sep 14, 2020 to Sep 20",
) # Email subject
self.assertEqual(mock_email_message.call_args[0][1], "weekly_report")
template_context = mock_email_message.call_args[0][2]
self.assertEqual(template_context["team"], "The Bakery")
self.assertEqual(
template_context["period_start"], datetime.datetime(2020, 9, 14, tzinfo=pytz.UTC),
)
self.assertEqual(
template_context["period_end"], datetime.datetime(2020, 9, 20, 23, 59, 59, 999999, tzinfo=pytz.UTC),
)
self.assertEqual(
template_context["active_users"], 6,
)
self.assertEqual(
template_context["active_users_delta"], 0.5,
)
self.assertEqual(
round(template_context["user_distribution"]["new"], 2), 0.17,
)
self.assertEqual(
template_context["user_distribution"]["retained"], 0.5,
)
self.assertEqual(
round(template_context["user_distribution"]["resurrected"], 2), 0.33,
)
self.assertEqual(
template_context["churned_users"], {"abs": 1, "ratio": 0.25, "delta": None},
)
|
py
|
1a559b52ea77ea4ac5e489621120f5578fe4e075
|
#!/usr/bin/env python
"""
self-contained to write legacy storage (pickle/msgpack) files
To use this script. Create an environment where you want
generate pickles, say its for 0.18.1, with your pandas clone
in ~/pandas
. activate pandas_0.18.1
cd ~/
$ python pandas/pandas/tests/io/generate_legacy_storage_files.py \
pandas/pandas/tests/io/data/legacy_pickle/0.18.1/ pickle
This script generates a storage file for the current arch, system,
and python version
pandas version: 0.18.1
output dir : pandas/pandas/tests/io/data/legacy_pickle/0.18.1/
storage format: pickle
created pickle file: 0.18.1_x86_64_darwin_3.5.2.pickle
The idea here is you are using the *current* version of the
generate_legacy_storage_files with an *older* version of pandas to
generate a pickle file. We will then check this file into a current
branch, and test using test_pickle.py. This will load the *older*
pickles and test versus the current data that is generated
(with master). These are then compared.
If we have cases where we changed the signature (e.g. we renamed
offset -> freq in Timestamp). Then we have to conditionally execute
in the generate_legacy_storage_files.py to make it
run under the older AND the newer version.
"""
from datetime import timedelta
from distutils.version import LooseVersion
import os
import platform as pl
import sys
import numpy as np
import pandas
from pandas import (
Categorical, DataFrame, Index, MultiIndex, NaT, Period, Series,
SparseDataFrame, SparseSeries, Timestamp, bdate_range, date_range,
period_range, timedelta_range, to_msgpack)
from pandas.tseries.offsets import (
FY5253, BusinessDay, BusinessHour, CustomBusinessDay, DateOffset, Day,
Easter, Hour, LastWeekOfMonth, Minute, MonthBegin, MonthEnd, QuarterBegin,
QuarterEnd, SemiMonthBegin, SemiMonthEnd, Week, WeekOfMonth, YearBegin,
YearEnd)
_loose_version = LooseVersion(pandas.__version__)
def _create_sp_series():
nan = np.nan
# nan-based
arr = np.arange(15, dtype=np.float64)
arr[7:12] = nan
arr[-1:] = nan
bseries = SparseSeries(arr, kind='block')
bseries.name = 'bseries'
return bseries
def _create_sp_tsseries():
nan = np.nan
# nan-based
arr = np.arange(15, dtype=np.float64)
arr[7:12] = nan
arr[-1:] = nan
date_index = bdate_range('1/1/2011', periods=len(arr))
bseries = SparseSeries(arr, index=date_index, kind='block')
bseries.name = 'btsseries'
return bseries
def _create_sp_frame():
nan = np.nan
data = {'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
'C': np.arange(10).astype(np.int64),
'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]}
dates = bdate_range('1/1/2011', periods=10)
return SparseDataFrame(data, index=dates)
def create_data():
""" create the pickle/msgpack data """
data = {
'A': [0., 1., 2., 3., np.nan],
'B': [0, 1, 0, 1, 0],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': date_range('1/1/2009', periods=5),
'E': [0., 1, Timestamp('20100101'), 'foo', 2.]
}
scalars = dict(timestamp=Timestamp('20130101'),
period=Period('2012', 'M'))
index = dict(int=Index(np.arange(10)),
date=date_range('20130101', periods=10),
period=period_range('2013-01-01', freq='M', periods=10),
float=Index(np.arange(10, dtype=np.float64)),
uint=Index(np.arange(10, dtype=np.uint64)),
timedelta=timedelta_range('00:00:00', freq='30T', periods=10))
if _loose_version >= LooseVersion('0.18'):
from pandas import RangeIndex
index['range'] = RangeIndex(10)
if _loose_version >= LooseVersion('0.21'):
from pandas import interval_range
index['interval'] = interval_range(0, periods=10)
mi = dict(reg2=MultiIndex.from_tuples(
tuple(zip(*[['bar', 'bar', 'baz', 'baz', 'foo',
'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one',
'two', 'one', 'two']])),
names=['first', 'second']))
series = dict(float=Series(data['A']),
int=Series(data['B']),
mixed=Series(data['E']),
ts=Series(np.arange(10).astype(np.int64),
index=date_range('20130101', periods=10)),
mi=Series(np.arange(5).astype(np.float64),
index=MultiIndex.from_tuples(
tuple(zip(*[[1, 1, 2, 2, 2],
[3, 4, 3, 4, 5]])),
names=['one', 'two'])),
dup=Series(np.arange(5).astype(np.float64),
index=['A', 'B', 'C', 'D', 'A']),
cat=Series(Categorical(['foo', 'bar', 'baz'])),
dt=Series(date_range('20130101', periods=5)),
dt_tz=Series(date_range('20130101', periods=5,
tz='US/Eastern')),
period=Series([Period('2000Q1')] * 5))
mixed_dup_df = DataFrame(data)
mixed_dup_df.columns = list("ABCDA")
frame = dict(float=DataFrame({'A': series['float'],
'B': series['float'] + 1}),
int=DataFrame({'A': series['int'],
'B': series['int'] + 1}),
mixed=DataFrame({k: data[k]
for k in ['A', 'B', 'C', 'D']}),
mi=DataFrame({'A': np.arange(5).astype(np.float64),
'B': np.arange(5).astype(np.int64)},
index=MultiIndex.from_tuples(
tuple(zip(*[['bar', 'bar', 'baz',
'baz', 'baz'],
['one', 'two', 'one',
'two', 'three']])),
names=['first', 'second'])),
dup=DataFrame(np.arange(15).reshape(5, 3).astype(np.float64),
columns=['A', 'B', 'A']),
cat_onecol=DataFrame({'A': Categorical(['foo', 'bar'])}),
cat_and_float=DataFrame({
'A': Categorical(['foo', 'bar', 'baz']),
'B': np.arange(3).astype(np.int64)}),
mixed_dup=mixed_dup_df,
dt_mixed_tzs=DataFrame({
'A': Timestamp('20130102', tz='US/Eastern'),
'B': Timestamp('20130603', tz='CET')}, index=range(5)),
dt_mixed2_tzs=DataFrame({
'A': Timestamp('20130102', tz='US/Eastern'),
'B': Timestamp('20130603', tz='CET'),
'C': Timestamp('20130603', tz='UTC')}, index=range(5))
)
cat = dict(int8=Categorical(list('abcdefg')),
int16=Categorical(np.arange(1000)),
int32=Categorical(np.arange(10000)))
timestamp = dict(normal=Timestamp('2011-01-01'),
nat=NaT,
tz=Timestamp('2011-01-01', tz='US/Eastern'))
if _loose_version < LooseVersion('0.19.2'):
timestamp['freq'] = Timestamp('2011-01-01', offset='D')
timestamp['both'] = Timestamp('2011-01-01', tz='Asia/Tokyo',
offset='M')
else:
timestamp['freq'] = Timestamp('2011-01-01', freq='D')
timestamp['both'] = Timestamp('2011-01-01', tz='Asia/Tokyo',
freq='M')
off = {'DateOffset': DateOffset(years=1),
'DateOffset_h_ns': DateOffset(hour=6, nanoseconds=5824),
'BusinessDay': BusinessDay(offset=timedelta(seconds=9)),
'BusinessHour': BusinessHour(normalize=True, n=6, end='15:14'),
'CustomBusinessDay': CustomBusinessDay(weekmask='Mon Fri'),
'SemiMonthBegin': SemiMonthBegin(day_of_month=9),
'SemiMonthEnd': SemiMonthEnd(day_of_month=24),
'MonthBegin': MonthBegin(1),
'MonthEnd': MonthEnd(1),
'QuarterBegin': QuarterBegin(1),
'QuarterEnd': QuarterEnd(1),
'Day': Day(1),
'YearBegin': YearBegin(1),
'YearEnd': YearEnd(1),
'Week': Week(1),
'Week_Tues': Week(2, normalize=False, weekday=1),
'WeekOfMonth': WeekOfMonth(week=3, weekday=4),
'LastWeekOfMonth': LastWeekOfMonth(n=1, weekday=3),
'FY5253': FY5253(n=2, weekday=6, startingMonth=7, variation="last"),
'Easter': Easter(),
'Hour': Hour(1),
'Minute': Minute(1)}
return dict(series=series,
frame=frame,
index=index,
scalars=scalars,
mi=mi,
sp_series=dict(float=_create_sp_series(),
ts=_create_sp_tsseries()),
sp_frame=dict(float=_create_sp_frame()),
cat=cat,
timestamp=timestamp,
offsets=off)
def create_pickle_data():
data = create_data()
# Pre-0.14.1 versions generated non-unpicklable mixed-type frames and
# panels if their columns/items were non-unique.
if _loose_version < LooseVersion('0.14.1'):
del data['frame']['mixed_dup']
del data['panel']['mixed_dup']
if _loose_version < LooseVersion('0.17.0'):
del data['series']['period']
del data['scalars']['period']
return data
def _u(x):
return {k: _u(x[k]) for k in x} if isinstance(x, dict) else x
def create_msgpack_data():
data = create_data()
if _loose_version < LooseVersion('0.17.0'):
del data['frame']['mixed_dup']
del data['panel']['mixed_dup']
del data['frame']['dup']
del data['panel']['dup']
if _loose_version < LooseVersion('0.18.0'):
del data['series']['dt_tz']
del data['frame']['dt_mixed_tzs']
# Not supported
del data['sp_series']
del data['sp_frame']
del data['series']['cat']
del data['series']['period']
del data['frame']['cat_onecol']
del data['frame']['cat_and_float']
del data['scalars']['period']
if _loose_version < LooseVersion('0.23.0'):
del data['index']['interval']
del data['offsets']
return _u(data)
def platform_name():
return '_'.join([str(pandas.__version__), str(pl.machine()),
str(pl.system().lower()), str(pl.python_version())])
def write_legacy_pickles(output_dir):
# make sure we are < 0.13 compat (in py3)
try:
from pandas.compat import cPickle as pickle # noqa
except ImportError:
import pickle
version = pandas.__version__
print("This script generates a storage file for the current arch, system, "
"and python version")
print(" pandas version: {0}".format(version))
print(" output dir : {0}".format(output_dir))
print(" storage format: pickle")
pth = '{0}.pickle'.format(platform_name())
fh = open(os.path.join(output_dir, pth), 'wb')
pickle.dump(create_pickle_data(), fh, pickle.HIGHEST_PROTOCOL)
fh.close()
print("created pickle file: %s" % pth)
def write_legacy_msgpack(output_dir, compress):
version = pandas.__version__
print("This script generates a storage file for the current arch, "
"system, and python version")
print(" pandas version: {0}".format(version))
print(" output dir : {0}".format(output_dir))
print(" storage format: msgpack")
pth = '{0}.msgpack'.format(platform_name())
to_msgpack(os.path.join(output_dir, pth), create_msgpack_data(),
compress=compress)
print("created msgpack file: %s" % pth)
def write_legacy_file():
# force our cwd to be the first searched
sys.path.insert(0, '.')
if not (3 <= len(sys.argv) <= 4):
exit("Specify output directory and storage type: generate_legacy_"
"storage_files.py <output_dir> <storage_type> "
"<msgpack_compress_type>")
output_dir = str(sys.argv[1])
storage_type = str(sys.argv[2])
try:
compress_type = str(sys.argv[3])
except IndexError:
compress_type = None
if storage_type == 'pickle':
write_legacy_pickles(output_dir=output_dir)
elif storage_type == 'msgpack':
write_legacy_msgpack(output_dir=output_dir, compress=compress_type)
else:
exit("storage_type must be one of {'pickle', 'msgpack'}")
if __name__ == '__main__':
write_legacy_file()
|
py
|
1a559c8296a8f361e14145548aad230587540917
|
"""Given a folder with subfolders, run the schizo test and report on the min,
max, etc statistics of each subfolder.
"""
import argparse
import collections
import multiprocessing
import os
import re
import subprocess
progdir = os.path.dirname(os.path.abspath(__file__))
mainscript = os.path.join(progdir, '../main.py')
def dir_arg(s):
if os.path.isdir(s):
return s
raise ArgumentError(f'{s} is not a directory')
def get_rmse(fpath):
p = subprocess.Popen(['python3', mainscript, fpath],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
r = p.wait()
vals = {}
for m in re.finditer(rb'^Max RMSE, (.*) to (.*): ([^\n]+)', stderr, flags=re.M):
vals[(m.group(1), m.group(2))] = float(m.group(3))
return vals
def main():
ap = argparse.ArgumentParser(description=__doc__)
ap.add_argument('folder', type=dir_arg)
args = ap.parse_args()
pool = multiprocessing.Pool()
all_files = []
for path, dirs, files in os.walk(args.folder):
for f in files:
fpath = os.path.join(path, f)
if os.path.isfile(fpath):
all_files.append(fpath)
results = pool.map(get_rmse, all_files)
rmses = sorted(zip(all_files, results), key=lambda k: k[0])
folders = []
class Tracker:
def __init__(self):
self.items = collections.defaultdict(list)
def __str__(self):
result = []
for (t1, t2), vals in self.items.items():
result.append(f'{t1} to {t2}: {sum(vals) / len(vals):.4f} ({min(vals):.4f} / {max(vals):.4f})')
return ', '.join(result)
def add(self, rmse_vals):
for k, v in rmse_vals.items():
self.items[k].append(v)
def cap(parts, rmse):
for fi in reversed(range(len(folders))):
if len(parts) <= fi or parts[fi] != folders[fi][0]:
fname = '/'.join([ff[0] for ff in folders])
fstr = "\n ".join(str(folders[fi][1]).split(", "))
print(f'{fname}\n {fstr}')
folders.pop()
else:
break
for fi in range(len(folders), len(parts)):
folders.append([parts[fi], Tracker()])
for f in folders:
f[1].add(rmse)
for f, r in rmses:
remainder = f
parts = []
while True:
remainder, tail = os.path.split(remainder)
if not tail:
break
parts.insert(0, tail)
if not remainder:
break
cap(parts, r)
cap([], None)
if __name__ == '__main__':
main()
|
py
|
1a559f46668c26a67dcfdb505cd887a807690162
|
from __future__ import absolute_import, print_function
from django.conf.urls import include, patterns, url
from .endpoints.accept_project_transfer import AcceptProjectTransferEndpoint
from .endpoints.organization_dashboards import OrganizationDashboardsEndpoint
from .endpoints.relay_heartbeat import RelayHeartbeatEndpoint
from .endpoints.relay_projectconfigs import RelayProjectConfigsEndpoint
from .endpoints.relay_publickeys import RelayPublicKeysEndpoint
from .endpoints.relay_index import RelayIndexEndpoint
from .endpoints.relay_details import RelayDetailsEndpoint
from .endpoints.relay_register import RelayRegisterChallengeEndpoint, \
RelayRegisterResponseEndpoint
from .endpoints.api_applications import ApiApplicationsEndpoint
from .endpoints.api_application_details import ApiApplicationDetailsEndpoint
from .endpoints.api_authorizations import ApiAuthorizationsEndpoint
from .endpoints.api_tokens import ApiTokensEndpoint
from .endpoints.assistant import AssistantEndpoint
from .endpoints.auth_index import AuthIndexEndpoint
from .endpoints.authenticator_index import AuthenticatorIndexEndpoint
from .endpoints.broadcast_details import BroadcastDetailsEndpoint
from .endpoints.broadcast_index import BroadcastIndexEndpoint
from .endpoints.catchall import CatchallEndpoint
from .endpoints.chunk import ChunkUploadEndpoint
from .endpoints.event_attachment_details import EventAttachmentDetailsEndpoint
from .endpoints.event_attachments import EventAttachmentsEndpoint
from .endpoints.event_details import EventDetailsEndpoint
from .endpoints.event_owners import EventOwnersEndpoint
from .endpoints.event_apple_crash_report import EventAppleCrashReportEndpoint
from .endpoints.event_grouping_info import EventGroupingInfoEndpoint
from .endpoints.group_details import GroupDetailsEndpoint
from .endpoints.group_events import GroupEventsEndpoint
from .endpoints.group_events_latest import GroupEventsLatestEndpoint
from .endpoints.group_events_oldest import GroupEventsOldestEndpoint
from .endpoints.group_hashes import GroupHashesEndpoint
from .endpoints.group_integration_details import GroupIntegrationDetailsEndpoint
from .endpoints.group_integrations import GroupIntegrationsEndpoint
from .endpoints.group_notes import GroupNotesEndpoint
from .endpoints.group_notes_details import GroupNotesDetailsEndpoint
from .endpoints.group_participants import GroupParticipantsEndpoint
from .endpoints.group_external_issues import GroupExternalIssuesEndpoint
from .endpoints.group_external_issue_details import GroupExternalIssueDetailsEndpoint
from .endpoints.group_similar_issues import GroupSimilarIssuesEndpoint
from .endpoints.group_stats import GroupStatsEndpoint
from .endpoints.group_tags import GroupTagsEndpoint
from .endpoints.group_tagkey_details import GroupTagKeyDetailsEndpoint
from .endpoints.group_tagkey_values import GroupTagKeyValuesEndpoint
from .endpoints.group_tombstone_details import GroupTombstoneDetailsEndpoint
from .endpoints.group_tombstone import GroupTombstoneEndpoint
from .endpoints.group_user_reports import GroupUserReportsEndpoint
from .endpoints.organization_incident_details import OrganizationIncidentDetailsEndpoint
from .endpoints.organization_incident_seen import OrganizationIncidentSeenEndpoint
from .endpoints.index import IndexEndpoint
from .endpoints.internal_queue_tasks import InternalQueueTasksEndpoint
from .endpoints.internal_quotas import InternalQuotasEndpoint
from .endpoints.internal_stats import InternalStatsEndpoint
from .endpoints.monitor_checkins import MonitorCheckInsEndpoint
from .endpoints.monitor_checkin_details import MonitorCheckInDetailsEndpoint
from .endpoints.monitor_details import MonitorDetailsEndpoint
from .endpoints.monitor_stats import MonitorStatsEndpoint
from .endpoints.organization_access_request_details import OrganizationAccessRequestDetailsEndpoint
from .endpoints.organization_activity import OrganizationActivityEndpoint
from .endpoints.organization_auditlogs import OrganizationAuditLogsEndpoint
from .endpoints.organization_api_key_index import OrganizationApiKeyIndexEndpoint
from .endpoints.organization_api_key_details import OrganizationApiKeyDetailsEndpoint
from .endpoints.organization_auth_providers import OrganizationAuthProvidersEndpoint
from .endpoints.organization_auth_provider_details import OrganizationAuthProviderDetailsEndpoint
from .endpoints.organization_auth_provider_send_reminders import OrganizationAuthProviderSendRemindersEndpoint
from .endpoints.organization_avatar import OrganizationAvatarEndpoint
from .endpoints.organization_details import OrganizationDetailsEndpoint
from .endpoints.organization_discover_query import OrganizationDiscoverQueryEndpoint
from .endpoints.organization_discover_saved_queries import OrganizationDiscoverSavedQueriesEndpoint
from .endpoints.organization_discover_saved_query_detail import OrganizationDiscoverSavedQueryDetailEndpoint
from .endpoints.organization_events import OrganizationEventsEndpoint, OrganizationEventsMetaEndpoint, OrganizationEventsStatsEndpoint, OrganizationEventsHeatmapEndpoint
from .endpoints.organization_group_index import OrganizationGroupIndexEndpoint
from .endpoints.organization_dashboard_details import OrganizationDashboardDetailsEndpoint
from .endpoints.organization_dashboard_widget_details import OrganizationDashboardWidgetDetailsEndpoint
from .endpoints.organization_dashboard_widgets import OrganizationDashboardWidgetsEndpoint
from .endpoints.organization_health import OrganizationHealthTopEndpoint, OrganizationHealthGraphEndpoint
from .endpoints.organization_shortid import ShortIdLookupEndpoint
from .endpoints.organization_environments import OrganizationEnvironmentsEndpoint
from .endpoints.organization_eventid import EventIdLookupEndpoint
from .endpoints.organization_slugs import SlugsUpdateEndpoint
from .endpoints.organization_incident_activity_index import OrganizationIncidentActivityIndexEndpoint
from .endpoints.organization_incident_comment_index import OrganizationIncidentCommentIndexEndpoint
from .endpoints.organization_incident_comment_details import OrganizationIncidentCommentDetailsEndpoint
from .endpoints.organization_incident_index import OrganizationIncidentIndexEndpoint
from .endpoints.organization_incident_subscription_index import OrganizationIncidentSubscriptionIndexEndpoint
from .endpoints.organization_incident_suspects_index import OrganizationIncidentSuspectsIndexEndpoint
from .endpoints.organization_issues_new import OrganizationIssuesNewEndpoint
from .endpoints.organization_issues_resolved_in_release import OrganizationIssuesResolvedInReleaseEndpoint
from .endpoints.organization_member_details import OrganizationMemberDetailsEndpoint
from .endpoints.organization_member_index import OrganizationMemberIndexEndpoint
from .endpoints.organization_member_issues_assigned import OrganizationMemberIssuesAssignedEndpoint
from .endpoints.organization_member_issues_bookmarked import OrganizationMemberIssuesBookmarkedEndpoint
from .endpoints.organization_member_issues_viewed import OrganizationMemberIssuesViewedEndpoint
from .endpoints.organization_member_unreleased_commits import OrganizationMemberUnreleasedCommitsEndpoint
from .endpoints.organization_member_team_details import OrganizationMemberTeamDetailsEndpoint
from .endpoints.organization_monitors import OrganizationMonitorsEndpoint
from .endpoints.organization_onboarding_tasks import OrganizationOnboardingTaskEndpoint
from .endpoints.organization_index import OrganizationIndexEndpoint
from .endpoints.organization_pinned_searches import OrganizationPinnedSearchEndpoint
from .endpoints.organization_plugins import OrganizationPluginsEndpoint
from .endpoints.organization_processingissues import OrganizationProcessingIssuesEndpoint
from .endpoints.organization_projects import OrganizationProjectsEndpoint
from .endpoints.organization_recent_searches import OrganizationRecentSearchesEndpoint
from .endpoints.organization_releases import OrganizationReleasesEndpoint
from .endpoints.organization_release_details import OrganizationReleaseDetailsEndpoint
from .endpoints.organization_release_assemble import OrganizationReleaseAssembleEndpoint
from .endpoints.organization_release_files import OrganizationReleaseFilesEndpoint
from .endpoints.organization_release_file_details import OrganizationReleaseFileDetailsEndpoint
from .endpoints.organization_release_commits import OrganizationReleaseCommitsEndpoint
from .endpoints.organization_repositories import OrganizationRepositoriesEndpoint
from .endpoints.organization_integration_details import OrganizationIntegrationDetailsEndpoint
from .endpoints.organization_integration_repos import OrganizationIntegrationReposEndpoint
from .endpoints.organization_integrations import OrganizationIntegrationsEndpoint
from .endpoints.organization_config_integrations import OrganizationConfigIntegrationsEndpoint
from .endpoints.organization_config_repositories import OrganizationConfigRepositoriesEndpoint
from .endpoints.organization_repository_commits import OrganizationRepositoryCommitsEndpoint
from .endpoints.organization_repository_details import OrganizationRepositoryDetailsEndpoint
from .endpoints.organization_search_details import OrganizationSearchDetailsEndpoint
from .endpoints.organization_searches import OrganizationSearchesEndpoint
from .endpoints.organization_sentry_apps import OrganizationSentryAppsEndpoint
from .endpoints.organization_tagkey_values import OrganizationTagKeyValuesEndpoint
from .endpoints.organization_tags import OrganizationTagsEndpoint
from .endpoints.organization_user_reports import OrganizationUserReportsEndpoint
from .endpoints.organization_users import OrganizationUsersEndpoint
from .endpoints.organization_user_details import OrganizationUserDetailsEndpoint
from .endpoints.sentry_app_installations import SentryAppInstallationsEndpoint
from .endpoints.sentry_app_installation_details import SentryAppInstallationDetailsEndpoint
from .endpoints.sentry_app_installation_external_requests import SentryAppInstallationExternalRequestsEndpoint
from .endpoints.sentry_app_installation_external_issues import SentryAppInstallationExternalIssuesEndpoint
from .endpoints.organization_stats import OrganizationStatsEndpoint
from .endpoints.organization_teams import OrganizationTeamsEndpoint
from .endpoints.organization_user_issues import OrganizationUserIssuesEndpoint
from .endpoints.organization_user_issues_search import OrganizationUserIssuesSearchEndpoint
from .endpoints.project_avatar import ProjectAvatarEndpoint
from .endpoints.project_details import ProjectDetailsEndpoint
from .endpoints.project_transfer import ProjectTransferEndpoint
from .endpoints.project_create_sample import ProjectCreateSampleEndpoint
from .endpoints.project_docs_platform import ProjectDocsPlatformEndpoint
from .endpoints.project_environments import ProjectEnvironmentsEndpoint
from .endpoints.project_environment_details import ProjectEnvironmentDetailsEndpoint
from .endpoints.project_platforms import ProjectPlatformsEndpoint
from .endpoints.project_events import ProjectEventsEndpoint
from .endpoints.project_event_details import ProjectEventDetailsEndpoint, EventJsonEndpoint
from .endpoints.project_filters import ProjectFiltersEndpoint
from .endpoints.project_filter_details import ProjectFilterDetailsEndpoint
from .endpoints.project_group_index import ProjectGroupIndexEndpoint
from .endpoints.project_group_stats import ProjectGroupStatsEndpoint
from .endpoints.project_index import ProjectIndexEndpoint
from .endpoints.project_issues_resolved_in_release import ProjectIssuesResolvedInReleaseEndpoint
from .endpoints.project_keys import ProjectKeysEndpoint
from .endpoints.project_key_details import ProjectKeyDetailsEndpoint
from .endpoints.project_key_stats import ProjectKeyStatsEndpoint
from .endpoints.project_member_index import ProjectMemberIndexEndpoint
from .endpoints.project_ownership import ProjectOwnershipEndpoint
from .endpoints.project_plugins import ProjectPluginsEndpoint
from .endpoints.project_plugin_details import ProjectPluginDetailsEndpoint
from .endpoints.project_release_details import ProjectReleaseDetailsEndpoint
from .endpoints.project_release_files import ProjectReleaseFilesEndpoint
from .endpoints.project_release_file_details import ProjectReleaseFileDetailsEndpoint
from .endpoints.project_release_commits import ProjectReleaseCommitsEndpoint
from .endpoints.project_releases import ProjectReleasesEndpoint
from .endpoints.project_release_setup import ProjectReleaseSetupCompletionEndpoint
from .endpoints.project_releases_token import ProjectReleasesTokenEndpoint
from .endpoints.project_rules import ProjectRulesEndpoint
from .endpoints.project_rules_configuration import ProjectRulesConfigurationEndpoint
from .endpoints.project_rule_details import ProjectRuleDetailsEndpoint
from .endpoints.project_searches import ProjectSearchesEndpoint
from .endpoints.project_search_details import ProjectSearchDetailsEndpoint
from .endpoints.project_stats import ProjectStatsEndpoint
from .endpoints.project_tags import ProjectTagsEndpoint
from .endpoints.project_tagkey_details import ProjectTagKeyDetailsEndpoint
from .endpoints.project_tagkey_values import ProjectTagKeyValuesEndpoint
from .endpoints.project_team_details import ProjectTeamDetailsEndpoint
from .endpoints.project_teams import ProjectTeamsEndpoint
from .endpoints.project_processingissues import ProjectProcessingIssuesEndpoint, \
ProjectProcessingIssuesFixEndpoint, ProjectProcessingIssuesDiscardEndpoint
from .endpoints.project_reprocessing import ProjectReprocessingEndpoint
from .endpoints.project_servicehooks import ProjectServiceHooksEndpoint
from .endpoints.project_servicehook_details import ProjectServiceHookDetailsEndpoint
from .endpoints.project_servicehook_stats import ProjectServiceHookStatsEndpoint
from .endpoints.project_user_details import ProjectUserDetailsEndpoint
from .endpoints.project_user_reports import ProjectUserReportsEndpoint
from .endpoints.project_user_stats import ProjectUserStatsEndpoint
from .endpoints.project_users import ProjectUsersEndpoint
from .endpoints.prompts_activity import PromptsActivityEndpoint
from .endpoints.filechange import CommitFileChangeEndpoint
from .endpoints.release_deploys import ReleaseDeploysEndpoint
from .endpoints.debug_files import DebugFilesEndpoint, DifAssembleEndpoint, \
UnknownDebugFilesEndpoint, AssociateDSymFilesEndpoint
from .endpoints.sentry_apps import SentryAppsEndpoint
from .endpoints.sentry_app_features import SentryAppFeaturesEndpoint
from .endpoints.sentry_apps_stats import SentryAppsStatsEndpoint
from .endpoints.sentry_app_components import SentryAppComponentsEndpoint, \
OrganizationSentryAppComponentsEndpoint
from .endpoints.sentry_app_details import SentryAppDetailsEndpoint
from .endpoints.sentry_app_authorizations import SentryAppAuthorizationsEndpoint
from .endpoints.shared_group_details import SharedGroupDetailsEndpoint
from .endpoints.system_health import SystemHealthEndpoint
from .endpoints.system_options import SystemOptionsEndpoint
from .endpoints.team_avatar import TeamAvatarEndpoint
from .endpoints.team_details import TeamDetailsEndpoint
from .endpoints.team_groups_new import TeamGroupsNewEndpoint
from .endpoints.team_groups_trending import TeamGroupsTrendingEndpoint
from .endpoints.team_members import TeamMembersEndpoint
from .endpoints.team_projects import TeamProjectsEndpoint
from .endpoints.team_stats import TeamStatsEndpoint
from .endpoints.useravatar import UserAvatarEndpoint
from .endpoints.user_appearance import UserAppearanceEndpoint
from .endpoints.user_authenticator_index import UserAuthenticatorIndexEndpoint
from .endpoints.user_authenticator_enroll import UserAuthenticatorEnrollEndpoint
from .endpoints.user_authenticator_details import UserAuthenticatorDetailsEndpoint
from .endpoints.user_identity_details import UserIdentityDetailsEndpoint
from .endpoints.user_index import UserIndexEndpoint
from .endpoints.user_details import UserDetailsEndpoint
from .endpoints.user_emails import UserEmailsEndpoint
from .endpoints.user_emails_confirm import UserEmailsConfirmEndpoint
from .endpoints.user_ips import UserIPsEndpoint
from .endpoints.user_organizations import UserOrganizationsEndpoint
from .endpoints.user_notification_details import UserNotificationDetailsEndpoint
from .endpoints.user_password import UserPasswordEndpoint
from .endpoints.user_notification_fine_tuning import UserNotificationFineTuningEndpoint
from .endpoints.user_social_identities_index import UserSocialIdentitiesIndexEndpoint
from .endpoints.user_social_identity_details import UserSocialIdentityDetailsEndpoint
from .endpoints.user_subscriptions import UserSubscriptionsEndpoint
from .endpoints.event_file_committers import EventFileCommittersEndpoint
from .endpoints.setup_wizard import SetupWizard
from .endpoints.grouping_configs import GroupingConfigsEndpoint
from .endpoints.grouping_enhancements import GroupingEnhancementsEndpoint
from .endpoints.builtin_symbol_sources import BuiltinSymbolSourcesEndpoint
urlpatterns = patterns(
'',
# Relay
url(
r'^relays/$',
RelayIndexEndpoint.as_view(),
name='sentry-api-0-relays-index'
),
url(
r'^relays/register/challenge/$',
RelayRegisterChallengeEndpoint.as_view(),
name='sentry-api-0-relay-register-challenge'
),
url(
r'^relays/register/response/$',
RelayRegisterResponseEndpoint.as_view(),
name='sentry-api-0-relay-register-response'
),
url(
r'^relays/heartbeat/$',
RelayHeartbeatEndpoint.as_view(),
name='sentry-api-0-relay-heartbeat'
),
url(
r'^relays/projectconfigs/$',
RelayProjectConfigsEndpoint.as_view(),
name='sentry-api-0-relay-projectconfigs'
),
url(
r'^relays/publickeys/$',
RelayPublicKeysEndpoint.as_view(),
name='sentry-api-0-relay-publickeys'
),
url(
r'^relays/(?P<relay_id>[^\/]+)/$',
RelayDetailsEndpoint.as_view(),
name='sentry-api-0-relays-details'
),
# Api Data
url(
r'^assistant/$',
AssistantEndpoint.as_view(),
name='sentry-api-0-assistant',
),
url(
r'^api-applications/$',
ApiApplicationsEndpoint.as_view(),
name='sentry-api-0-api-applications'
),
url(
r'^api-applications/(?P<app_id>[^\/]+)/$',
ApiApplicationDetailsEndpoint.as_view(),
name='sentry-api-0-api-application-details'
),
url(
r'^api-authorizations/$',
ApiAuthorizationsEndpoint.as_view(),
name='sentry-api-0-api-authorizations'
),
url(r'^api-tokens/$', ApiTokensEndpoint.as_view(),
name='sentry-api-0-api-tokens'),
url(
r'^promptsactivity/$',
PromptsActivityEndpoint.as_view(),
name='sentry-api-0-promptsactivity',
),
# Auth
url(r'^auth/$', AuthIndexEndpoint.as_view(), name='sentry-api-0-auth'),
# List Authentiactors
url(r'^authenticators/$',
AuthenticatorIndexEndpoint.as_view(),
name='sentry-api-0-authenticator-index'),
# Broadcasts
url(r'^broadcasts/$', BroadcastIndexEndpoint.as_view(),
name='sentry-api-0-broadcast-index'),
url(r'^broadcasts/(?P<broadcast_id>[^\/]+)/$', BroadcastDetailsEndpoint.as_view()),
# Project transfer
url(r'^accept-transfer/$', AcceptProjectTransferEndpoint.as_view(),
name='sentry-api-0-accept-project-transfer'),
# Monitors
url(r'^monitors/(?P<monitor_id>[^\/]+)/$', MonitorDetailsEndpoint.as_view()),
url(r'^monitors/(?P<monitor_id>[^\/]+)/checkins/$', MonitorCheckInsEndpoint.as_view()),
url(r'^monitors/(?P<monitor_id>[^\/]+)/checkins/(?P<checkin_id>[^\/]+)/$',
MonitorCheckInDetailsEndpoint.as_view()),
url(r'^monitors/(?P<monitor_id>[^\/]+)/stats/$', MonitorStatsEndpoint.as_view()),
# Users
url(r'^users/$', UserIndexEndpoint.as_view(), name='sentry-api-0-user-index'),
url(
r'^users/(?P<user_id>[^\/]+)/$',
UserDetailsEndpoint.as_view(),
name='sentry-api-0-user-details'
),
url(
r'^users/(?P<user_id>[^\/]+)/avatar/$',
UserAvatarEndpoint.as_view(),
name='sentry-api-0-user-avatar'
),
url(
r'^users/(?P<user_id>[^\/]+)/appearance/$',
UserAppearanceEndpoint.as_view(),
name='sentry-api-0-user-appearance'
),
url(
r'^users/(?P<user_id>[^\/]+)/authenticators/$',
UserAuthenticatorIndexEndpoint.as_view(),
name='sentry-api-0-user-authenticator-index'
),
url(
r'^users/(?P<user_id>[^\/]+)/authenticators/(?P<interface_id>[^\/]+)/enroll/$',
UserAuthenticatorEnrollEndpoint.as_view(),
name='sentry-api-0-user-authenticator-enroll'
),
url(
r'^users/(?P<user_id>[^\/]+)/authenticators/(?P<auth_id>[^\/]+)/(?P<interface_device_id>[^\/]+)/$',
UserAuthenticatorDetailsEndpoint.as_view(),
name='sentry-api-0-user-authenticator-device-details'
),
url(
r'^users/(?P<user_id>[^\/]+)/authenticators/(?P<auth_id>[^\/]+)/$',
UserAuthenticatorDetailsEndpoint.as_view(),
name='sentry-api-0-user-authenticator-details'
),
url(
r'^users/(?P<user_id>[^\/]+)/emails/$',
UserEmailsEndpoint.as_view(),
name='sentry-api-0-user-emails'
),
url(
r'^users/(?P<user_id>[^\/]+)/emails/confirm/$',
UserEmailsConfirmEndpoint.as_view(),
name='sentry-api-0-user-emails-confirm'
),
url(
r'^users/(?P<user_id>[^\/]+)/identities/(?P<identity_id>[^\/]+)/$',
UserIdentityDetailsEndpoint.as_view(),
name='sentry-api-0-user-identity-details'
),
url(
r'^users/(?P<user_id>[^\/]+)/ips/$',
UserIPsEndpoint.as_view(),
name='sentry-api-0-user-ips'
),
url(
r'^users/(?P<user_id>[^\/]+)/organizations/$',
UserOrganizationsEndpoint.as_view(),
name='sentry-api-0-user-organizations'
),
url(
r'^users/(?P<user_id>[^\/]+)/notifications/$',
UserNotificationDetailsEndpoint.as_view(),
name='sentry-api-0-user-notifications'
),
url(
r'^users/(?P<user_id>[^\/]+)/password/$',
UserPasswordEndpoint.as_view(),
name='sentry-api-0-user-password'
),
url(
r'^users/(?P<user_id>[^\/]+)/notifications/(?P<notification_type>[^\/]+)/$',
UserNotificationFineTuningEndpoint.as_view(),
name='sentry-api-0-user-notifications-fine-tuning'
),
url(
r'^users/(?P<user_id>[^\/]+)/social-identities/$',
UserSocialIdentitiesIndexEndpoint.as_view(),
name='sentry-api-0-user-social-identities-index'),
url(
r'^users/(?P<user_id>[^\/]+)/social-identities/(?P<identity_id>[^\/]+)/$',
UserSocialIdentityDetailsEndpoint.as_view(),
name='sentry-api-0-user-social-identity-details'),
url(
r'^users/(?P<user_id>[^\/]+)/subscriptions/$',
UserSubscriptionsEndpoint.as_view(),
name='sentry-api-0-user-subscriptions'
),
# Incidents
url(
r'^organizations/(?P<organization_slug>[^\/]+)/incidents/(?P<incident_identifier>[^\/]+)/activity/$',
OrganizationIncidentActivityIndexEndpoint.as_view(),
name='sentry-api-0-organization-incident-activity'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/incidents/(?P<incident_identifier>[^\/]+)/comments/$',
OrganizationIncidentCommentIndexEndpoint.as_view(),
name='sentry-api-0-organization-incident-comments'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/incidents/(?P<incident_identifier>[^\/]+)/comments/(?P<activity_id>[^\/]+)/$',
OrganizationIncidentCommentDetailsEndpoint.as_view(),
name='sentry-api-0-organization-incident-comment-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/incidents/(?P<incident_identifier>[^\/]+)/$',
OrganizationIncidentDetailsEndpoint.as_view(),
name='sentry-api-0-organization-incident-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/incidents/$',
OrganizationIncidentIndexEndpoint.as_view(),
name='sentry-api-0-organization-incident-index'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/incidents/(?P<incident_identifier>[^\/]+)/seen/$',
OrganizationIncidentSeenEndpoint.as_view(),
name='sentry-api-0-organization-incident-seen'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/incidents/(?P<incident_identifier>[^\/]+)/subscriptions/$',
OrganizationIncidentSubscriptionIndexEndpoint.as_view(),
name='sentry-api-0-organization-incident-subscription-index'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/incidents/(?P<incident_identifier>[^\/]+)/suspects/$',
OrganizationIncidentSuspectsIndexEndpoint.as_view(),
name='sentry-api-0-organization-incident-suspect-index'
),
# Organizations
url(
r'^organizations/(?P<organization_slug>[^\/]+)/chunk-upload/$',
ChunkUploadEndpoint.as_view(),
name='sentry-api-0-chunk-upload'
),
url(
r'^organizations/$', OrganizationIndexEndpoint.as_view(), name='sentry-api-0-organizations'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/$',
OrganizationDetailsEndpoint.as_view(),
name='sentry-api-0-organization-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/discover/query/$',
OrganizationDiscoverQueryEndpoint.as_view(),
name='sentry-api-0-organization-discover-query'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/discover/saved/$',
OrganizationDiscoverSavedQueriesEndpoint.as_view(),
name='sentry-api-0-organization-discover-saved-queries'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/discover/saved/(?P<query_id>[^\/]+)/$',
OrganizationDiscoverSavedQueryDetailEndpoint.as_view(),
name='sentry-api-0-organization-discover-saved-query-detail'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/dashboards/(?P<dashboard_id>[^\/]+)/$',
OrganizationDashboardDetailsEndpoint.as_view(),
name='sentry-api-0-organization-dashboard-details',
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/dashboards/$',
OrganizationDashboardsEndpoint.as_view(),
name='sentry-api-0-organization-dashboards'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/dashboards/(?P<dashboard_id>[^\/]+)/widgets/$',
OrganizationDashboardWidgetsEndpoint.as_view(),
name='sentry-api-0-organization-dashboard-widgets',
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/dashboards/(?P<dashboard_id>[^\/]+)/widgets/(?P<widget_id>[^\/]+)$',
OrganizationDashboardWidgetDetailsEndpoint.as_view(),
name='sentry-api-0-organization-dashboard-widget-details',
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/health/top/$',
OrganizationHealthTopEndpoint.as_view(),
name='sentry-api-0-organization-health-top',
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/health/graph/$',
OrganizationHealthGraphEndpoint.as_view(),
name='sentry-api-0-organization-health-graph',
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/shortids/(?P<short_id>[^\/]+)/$',
ShortIdLookupEndpoint.as_view(),
name='sentry-api-0-short-id-lookup'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/eventids/(?P<event_id>[^\/]+)/$',
EventIdLookupEndpoint.as_view(),
name='sentry-api-0-event-id-lookup'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/slugs/$',
SlugsUpdateEndpoint.as_view(),
name='sentry-api-0-short-ids-update'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/access-requests/$',
OrganizationAccessRequestDetailsEndpoint.as_view(),
name='sentry-api-0-organization-access-requests'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/access-requests/(?P<request_id>\d+)/$',
OrganizationAccessRequestDetailsEndpoint.as_view(),
name='sentry-api-0-organization-access-request-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/activity/$',
OrganizationActivityEndpoint.as_view(),
name='sentry-api-0-organization-activity'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/api-keys/$',
OrganizationApiKeyIndexEndpoint.as_view(),
name='sentry-api-0-organization-api-key-index'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/api-keys/(?P<api_key_id>[^\/]+)/$',
OrganizationApiKeyDetailsEndpoint.as_view(),
name='sentry-api-0-organization-api-key-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/audit-logs/$',
OrganizationAuditLogsEndpoint.as_view(),
name='sentry-api-0-organization-audit-logs'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/auth-provider/$',
OrganizationAuthProviderDetailsEndpoint.as_view(),
name='sentry-api-0-organization-auth-provider'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/auth-providers/$',
OrganizationAuthProvidersEndpoint.as_view(),
name='sentry-api-0-organization-auth-providers'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/auth-provider/send-reminders/$',
OrganizationAuthProviderSendRemindersEndpoint.as_view(),
name='sentry-api-0-organization-auth-provider-send-reminders'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/avatar/$',
OrganizationAvatarEndpoint.as_view(),
name='sentry-api-0-organization-avatar'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/config/integrations/$',
OrganizationConfigIntegrationsEndpoint.as_view(),
name='sentry-api-0-organization-config-integrations'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/config/repos/$',
OrganizationConfigRepositoriesEndpoint.as_view(),
name='sentry-api-0-organization-config-repositories'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/events/$',
OrganizationEventsEndpoint.as_view(),
name='sentry-api-0-organization-events'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/events-stats/$',
OrganizationEventsStatsEndpoint.as_view(),
name='sentry-api-0-organization-events-stats'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/events-heatmap/$',
OrganizationEventsHeatmapEndpoint.as_view(),
name='sentry-api-0-organization-events-heatmap'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/events-meta/$',
OrganizationEventsMetaEndpoint.as_view(),
name='sentry-api-0-organization-events-meta'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/issues/new/$',
OrganizationIssuesNewEndpoint.as_view(),
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/issues/$',
OrganizationGroupIndexEndpoint.as_view(),
name='sentry-api-0-organization-group-index'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/integrations/$',
OrganizationIntegrationsEndpoint.as_view(),
name='sentry-api-0-organization-integrations'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/integrations/(?P<integration_id>[^\/]+)/$',
OrganizationIntegrationDetailsEndpoint.as_view(),
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/integrations/(?P<integration_id>[^\/]+)/repos/$',
OrganizationIntegrationReposEndpoint.as_view(),
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/members/$',
OrganizationMemberIndexEndpoint.as_view(),
name='sentry-api-0-organization-member-index'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/monitors/$',
OrganizationMonitorsEndpoint.as_view(),
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/pinned-searches/$',
OrganizationPinnedSearchEndpoint.as_view(),
name='sentry-api-0-organization-pinned-searches'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/recent-searches/$',
OrganizationRecentSearchesEndpoint.as_view(),
name='sentry-api-0-organization-recent-searches'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/searches/(?P<search_id>[^\/]+)/$',
OrganizationSearchDetailsEndpoint.as_view(),
name='sentry-api-0-organization-search-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/searches/$',
OrganizationSearchesEndpoint.as_view(),
name='sentry-api-0-organization-searches'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/users/issues/$',
OrganizationUserIssuesSearchEndpoint.as_view(),
name='sentry-api-0-organization-issue-search'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/users/(?P<user_id>[^\/]+)/issues/$',
OrganizationUserIssuesEndpoint.as_view(),
name='sentry-api-0-organization-user-issues'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/(?P<version>[^/]+)/resolved/$',
OrganizationIssuesResolvedInReleaseEndpoint.as_view(),
name='sentry-api-0-organization-release-resolved'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/$',
OrganizationMemberDetailsEndpoint.as_view(),
name='sentry-api-0-organization-member-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/unreleased-commits/$',
OrganizationMemberUnreleasedCommitsEndpoint.as_view(),
name='sentry-api-0-organization-member-unreleased-commits'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/issues/assigned/$',
OrganizationMemberIssuesAssignedEndpoint.as_view(),
name='sentry-api-0-organization-member-issues-assigned'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/issues/bookmarked/$',
OrganizationMemberIssuesBookmarkedEndpoint.as_view(),
name='sentry-api-0-organization-member-issues-bookmarked'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/issues/viewed/$',
OrganizationMemberIssuesViewedEndpoint.as_view(),
name='sentry-api-0-organization-member-issues-viewed'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/teams/(?P<team_slug>[^\/]+)/$',
OrganizationMemberTeamDetailsEndpoint.as_view(),
name='sentry-api-0-organization-member-team-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/processingissues/$',
OrganizationProcessingIssuesEndpoint.as_view(),
name='sentry-api-0-organization-processing-issues'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/projects/$',
OrganizationProjectsEndpoint.as_view(),
name='sentry-api-0-organization-projects'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/repos/$',
OrganizationRepositoriesEndpoint.as_view(),
name='sentry-api-0-organization-repositories'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/repos/(?P<repo_id>[^\/]+)/$',
OrganizationRepositoryDetailsEndpoint.as_view(),
name='sentry-api-0-organization-repository-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/repos/(?P<repo_id>[^\/]+)/commits/$',
OrganizationRepositoryCommitsEndpoint.as_view(),
name='sentry-api-0-organization-repository-commits'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/plugins/$',
OrganizationPluginsEndpoint.as_view(),
name='sentry-api-0-organization-plugins'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/$',
OrganizationReleasesEndpoint.as_view(),
name='sentry-api-0-organization-releases'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/(?P<version>[^/]+)/$',
OrganizationReleaseDetailsEndpoint.as_view(),
name='sentry-api-0-organization-release-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/(?P<version>[^/]+)/assemble/$',
OrganizationReleaseAssembleEndpoint.as_view(),
name='sentry-api-0-organization-release-assemble'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/(?P<version>[^/]+)/files/$',
OrganizationReleaseFilesEndpoint.as_view(),
name='sentry-api-0-organization-release-files'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/(?P<version>[^/]+)/files/(?P<file_id>\d+)/$',
OrganizationReleaseFileDetailsEndpoint.as_view(),
name='sentry-api-0-organization-release-file-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/(?P<version>[^/]+)/commitfiles/$',
CommitFileChangeEndpoint.as_view(),
name='sentry-api-0-release-commitfilechange'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/(?P<version>[^/]+)/deploys/$',
ReleaseDeploysEndpoint.as_view(),
name='sentry-api-0-organization-release-deploys'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/releases/(?P<version>[^/]+)/commits/$',
OrganizationReleaseCommitsEndpoint.as_view(),
name='sentry-api-0-organization-release-commits'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/user-feedback/$',
OrganizationUserReportsEndpoint.as_view(),
name='sentry-api-0-organization-user-feedback'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/users/$',
OrganizationUsersEndpoint.as_view(),
name='sentry-api-0-organization-users'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/users/(?P<user_id>[^\/]+)/$',
OrganizationUserDetailsEndpoint.as_view(),
name='sentry-api-0-organization-user-details'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/sentry-app-installations/$',
SentryAppInstallationsEndpoint.as_view(),
name='sentry-api-0-sentry-app-installations'
),
url(
r'^sentry-app-installations/(?P<uuid>[^\/]+)/$',
SentryAppInstallationDetailsEndpoint.as_view(),
name='sentry-api-0-sentry-app-installation-details'
),
url(
r'^sentry-app-installations/(?P<uuid>[^\/]+)/external-requests/$',
SentryAppInstallationExternalRequestsEndpoint.as_view(),
name='sentry-api-0-sentry-app-installation-external-requests'
),
url(
r'^sentry-app-installations/(?P<uuid>[^\/]+)/external-issues/$',
SentryAppInstallationExternalIssuesEndpoint.as_view(),
name='sentry-api-0-sentry-app-installation-external-issues'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/sentry-apps/$',
OrganizationSentryAppsEndpoint.as_view(),
name='sentry-api-0-organization-sentry-apps'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/stats/$',
OrganizationStatsEndpoint.as_view(),
name='sentry-api-0-organization-stats'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/teams/$',
OrganizationTeamsEndpoint.as_view(),
name='sentry-api-0-organization-teams'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/tags/$',
OrganizationTagsEndpoint.as_view(),
name='sentry-api-0-organization-tags'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/tags/(?P<key>[^/]+)/values/$',
OrganizationTagKeyValuesEndpoint.as_view(),
name='sentry-api-0-organization-tagkey-values'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/onboarding-tasks/$',
OrganizationOnboardingTaskEndpoint.as_view(),
name='sentry-api-0-organization-onboardingtasks'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/environments/$',
OrganizationEnvironmentsEndpoint.as_view(),
name='sentry-api-0-organization-environments',
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/broadcasts/$',
BroadcastIndexEndpoint.as_view(),
name='sentry-api-0-organization-broadcasts'
),
# Teams
url(
r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/$',
TeamDetailsEndpoint.as_view(),
name='sentry-api-0-team-details'
),
url(
r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/(?:issues|groups)/new/$',
TeamGroupsNewEndpoint.as_view(),
name='sentry-api-0-team-groups-new'
),
url(
r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/(?:issues|groups)/trending/$',
TeamGroupsTrendingEndpoint.as_view(),
name='sentry-api-0-team-groups-trending'
),
url(
r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/members/$',
TeamMembersEndpoint.as_view(),
name='sentry-api-0-team-members'
),
url(
r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/projects/$',
TeamProjectsEndpoint.as_view(),
name='sentry-api-0-team-project-index'
),
url(
r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/stats/$',
TeamStatsEndpoint.as_view(),
name='sentry-api-0-team-stats'
),
url(
r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/avatar/$',
TeamAvatarEndpoint.as_view(),
name='sentry-api-0-team-avatar'
),
# Projects
url(r'^projects/$', ProjectIndexEndpoint.as_view(),
name='sentry-api-0-projects'),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/$',
ProjectDetailsEndpoint.as_view(),
name='sentry-api-0-project-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/avatar/$',
ProjectAvatarEndpoint.as_view(),
name='sentry-api-0-project-avatar'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/create-sample/$',
ProjectCreateSampleEndpoint.as_view(),
name='sentry-api-0-project-create-sample'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/docs/(?P<platform>[\w-]+)/$',
ProjectDocsPlatformEndpoint.as_view(),
name='sentry-api-0-project-docs-platform'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/environments/$',
ProjectEnvironmentsEndpoint.as_view(),
name='sentry-api-0-project-environments'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/environments/(?P<environment>[^/]+)/$',
ProjectEnvironmentDetailsEndpoint.as_view(),
name='sentry-api-0-project-environment-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/platforms/$',
ProjectPlatformsEndpoint.as_view(),
name='sentry-api-0-project-platform-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/events/$',
ProjectEventsEndpoint.as_view(),
name='sentry-api-0-project-events'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/events/(?P<event_id>(?:\d+|[A-Fa-f0-9]{32}))/$',
ProjectEventDetailsEndpoint.as_view(),
name='sentry-api-0-project-event-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/events/(?P<event_id>[\w-]+)/grouping-info/$',
EventGroupingInfoEndpoint.as_view(),
name='sentry-api-0-event-grouping-info'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/events/(?P<event_id>[\w-]+)/apple-crash-report$',
EventAppleCrashReportEndpoint.as_view(),
name='sentry-api-0-event-apple-crash-report'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/events/(?P<event_id>[\w-]+)/attachments/$',
EventAttachmentsEndpoint.as_view(),
name='sentry-api-0-event-attachments'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/events/(?P<event_id>[\w-]+)/attachments/(?P<attachment_id>[\w-]+)/$',
EventAttachmentDetailsEndpoint.as_view(),
name='sentry-api-0-event-attachment-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/events/(?P<event_id>[\w-]+)/committers/$',
EventFileCommittersEndpoint.as_view(),
name='sentry-api-0-event-file-committers'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/events/(?P<event_id>[\w-]+)/json/$',
EventJsonEndpoint.as_view(),
name='sentry-api-0-event-json'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/events/(?P<event_id>[\w-]+)/owners/$',
EventOwnersEndpoint.as_view(),
name='sentry-api-0-event-owners'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/files/dsyms/$',
DebugFilesEndpoint.as_view(),
name='sentry-api-0-dsym-files'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/files/difs/assemble/$',
DifAssembleEndpoint.as_view(),
name='sentry-api-0-assemble-dif-files'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/files/dsyms/unknown/$',
UnknownDebugFilesEndpoint.as_view(),
name='sentry-api-0-unknown-dsym-files'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/files/dsyms/associate/$',
AssociateDSymFilesEndpoint.as_view(),
name='sentry-api-0-associate-dsym-files'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/filters/$',
ProjectFiltersEndpoint.as_view(),
name='sentry-api-0-project-filters'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/filters/(?P<filter_id>[\w-]+)/$',
ProjectFilterDetailsEndpoint.as_view(),
name='sentry-api-0-project-filters'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/hooks/$',
ProjectServiceHooksEndpoint.as_view(),
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/hooks/(?P<hook_id>[^\/]+)/$',
ProjectServiceHookDetailsEndpoint.as_view(),
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/hooks/(?P<hook_id>[^\/]+)/stats/$',
ProjectServiceHookStatsEndpoint.as_view(),
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/(?:issues|groups)/$',
ProjectGroupIndexEndpoint.as_view(),
name='sentry-api-0-project-group-index'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/(?:issues|groups)/stats/$',
ProjectGroupStatsEndpoint.as_view(),
name='sentry-api-0-project-group-stats'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/keys/$',
ProjectKeysEndpoint.as_view(),
name='sentry-api-0-project-keys'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/keys/(?P<key_id>[^\/]+)/$',
ProjectKeyDetailsEndpoint.as_view(),
name='sentry-api-0-project-key-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/keys/(?P<key_id>[^\/]+)/stats/$',
ProjectKeyStatsEndpoint.as_view()
),
url(
r'^projects/(?P<organization_slug>[^/]+)/(?P<project_slug>[^/]+)/members/$',
ProjectMemberIndexEndpoint.as_view(),
name='sentry-api-0-project-member-index'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/$',
ProjectReleasesEndpoint.as_view(),
name='sentry-api-0-project-releases'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/token/$',
ProjectReleasesTokenEndpoint.as_view(),
name='sentry-api-0-project-releases-token'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/completion/$',
ProjectReleaseSetupCompletionEndpoint.as_view(),
name='sentry-api-0-project-releases-completion-status'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/(?P<version>[^/]+)/$',
ProjectReleaseDetailsEndpoint.as_view(),
name='sentry-api-0-project-release-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/(?P<version>[^/]+)/commits/$',
ProjectReleaseCommitsEndpoint.as_view(),
name='sentry-api-0-project-release-commits'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/(?P<version>[^/]+)/resolved/$',
ProjectIssuesResolvedInReleaseEndpoint.as_view(),
name='sentry-api-0-project-release-resolved'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/(?P<version>[^/]+)/files/$',
ProjectReleaseFilesEndpoint.as_view(),
name='sentry-api-0-project-release-files'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/(?P<version>[^/]+)/files/(?P<file_id>\d+)/$',
ProjectReleaseFileDetailsEndpoint.as_view(),
name='sentry-api-0-project-release-file-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/rules/$',
ProjectRulesEndpoint.as_view(),
name='sentry-api-0-project-rules'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/rules/configuration/$',
ProjectRulesConfigurationEndpoint.as_view(),
name='sentry-api-0-project-rules-configuration'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/rules/(?P<rule_id>[^\/]+)/$',
ProjectRuleDetailsEndpoint.as_view(),
name='sentry-api-0-project-rule-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/searches/$',
ProjectSearchesEndpoint.as_view(),
name='sentry-api-0-project-searches'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/searches/(?P<search_id>[^\/]+)/$',
ProjectSearchDetailsEndpoint.as_view(),
name='sentry-api-0-project-search-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/stats/$',
ProjectStatsEndpoint.as_view(),
name='sentry-api-0-project-stats'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/tags/$',
ProjectTagsEndpoint.as_view(),
name='sentry-api-0-project-tags'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/tags/(?P<key>[^/]+)/$',
ProjectTagKeyDetailsEndpoint.as_view(),
name='sentry-api-0-project-tagkey-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/tags/(?P<key>[^/]+)/values/$',
ProjectTagKeyValuesEndpoint.as_view(),
name='sentry-api-0-project-tagkey-values'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/teams/$',
ProjectTeamsEndpoint.as_view(),
name='sentry-api-0-project-teams'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/teams/(?P<team_slug>[^\/]+)/$',
ProjectTeamDetailsEndpoint.as_view(),
name='sentry-api-0-project-team-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/transfer/$',
ProjectTransferEndpoint.as_view(),
name='sentry-api-0-project-transfer'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/users/$',
ProjectUsersEndpoint.as_view(),
name='sentry-api-0-project-users'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/users/(?P<user_hash>[^/]+)/$',
ProjectUserDetailsEndpoint.as_view(),
name='sentry-api-0-project-user-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/(?:user-feedback|user-reports)/$',
ProjectUserReportsEndpoint.as_view(),
name='sentry-api-0-project-user-reports'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/user-stats/$',
ProjectUserStatsEndpoint.as_view(),
name='sentry-api-0-project-userstats'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/processingissues/$',
ProjectProcessingIssuesEndpoint.as_view(),
name='sentry-api-0-project-processing-issues'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/processingissues/fix$',
ProjectProcessingIssuesFixEndpoint.as_view(),
name='sentry-api-0-project-fix-processing-issues'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/reprocessing/$',
ProjectReprocessingEndpoint.as_view(),
name='sentry-api-0-project-reprocessing'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/processingissues/discard/$',
ProjectProcessingIssuesDiscardEndpoint.as_view(),
name='sentry-api-0-project-discard-processing-issues'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/ownership/$',
ProjectOwnershipEndpoint.as_view(),
name='sentry-api-0-project-ownership'
),
# Load plugin project urls
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/plugins/$',
ProjectPluginsEndpoint.as_view(),
name='sentry-api-0-project-plugins'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/plugins/(?P<plugin_id>[^\/]+)/$',
ProjectPluginDetailsEndpoint.as_view(),
name='sentry-api-0-project-plugin-details'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/plugins?/',
include('sentry.plugins.base.project_api_urls')
),
# Groups
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/$',
GroupDetailsEndpoint.as_view(),
name='sentry-api-0-group-details'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/events/$',
GroupEventsEndpoint.as_view(),
name='sentry-api-0-group-events'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/events/latest/$',
GroupEventsLatestEndpoint.as_view(),
name='sentry-api-0-group-events-latest'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/events/oldest/$',
GroupEventsOldestEndpoint.as_view(),
name='sentry-api-0-group-events-oldest'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/(?:notes|comments)/$',
GroupNotesEndpoint.as_view(),
name='sentry-api-0-group-notes'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/(?:notes|comments)/(?P<note_id>[^\/]+)/$',
GroupNotesDetailsEndpoint.as_view(),
name='sentry-api-0-group-notes-details'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/hashes/$',
GroupHashesEndpoint.as_view(),
name='sentry-api-0-group-events'
),
url(
r'^issues/(?P<issue_id>\d+)/participants/$',
GroupParticipantsEndpoint.as_view(),
name='sentry-api-0-group-stats'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/stats/$',
GroupStatsEndpoint.as_view(),
name='sentry-api-0-group-stats'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/tags/$',
GroupTagsEndpoint.as_view(),
name='sentry-api-0-group-tags'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/tags/(?P<key>[^/]+)/$',
GroupTagKeyDetailsEndpoint.as_view(),
name='sentry-api-0-group-tagkey-details'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/tags/(?P<key>[^/]+)/values/$',
GroupTagKeyValuesEndpoint.as_view(),
name='sentry-api-0-group-tagkey-values'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/(?:user-feedback|user-reports)/$',
GroupUserReportsEndpoint.as_view(),
name='sentry-api-0-group-user-reports'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/similar/$',
GroupSimilarIssuesEndpoint.as_view(),
name='sentry-api-0-group-similar-issues'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/external-issues/$',
GroupExternalIssuesEndpoint.as_view(),
name='sentry-api-0-group-external-issues'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/external-issues/(?P<external_issue_id>\d+)/$',
GroupExternalIssueDetailsEndpoint.as_view(),
name='sentry-api-0-group-external-issue-details'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/integrations/$',
GroupIntegrationsEndpoint.as_view(),
name='sentry-api-0-group-integrations'
),
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/integrations/(?P<integration_id>\d+)/$',
GroupIntegrationDetailsEndpoint.as_view(),
name='sentry-api-0-group-integration-details'
),
# Load plugin group urls
url(
r'^(?:issues|groups)/(?P<issue_id>\d+)/plugins?/',
include('sentry.plugins.base.group_api_urls')
),
url(
r'^shared/(?:issues|groups)/(?P<share_id>[^\/]+)/$',
SharedGroupDetailsEndpoint.as_view(),
name='sentry-api-0-shared-group-details'
),
# Tombstone
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/tombstones/$',
GroupTombstoneEndpoint.as_view(),
name='sentry-api-0-group-tombstones'
),
url(
r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/tombstones/(?P<tombstone_id>\d+)/$',
GroupTombstoneDetailsEndpoint.as_view(),
name='sentry-api-0-group-tombstone-details'
),
# Events
url(
r'^events/(?P<event_id>\d+)/$',
EventDetailsEndpoint.as_view(),
name='sentry-api-0-event-details'
),
# Sentry Apps
url(
r'^sentry-apps/$',
SentryAppsEndpoint.as_view(),
name='sentry-api-0-sentry-apps'
),
url(
r'^sentry-apps-stats/$',
SentryAppsStatsEndpoint.as_view(),
name='sentry-api-0-sentry-apps-stats'
),
url(
r'^sentry-apps/(?P<sentry_app_slug>[^\/]+)/$',
SentryAppDetailsEndpoint.as_view(),
name='sentry-api-0-sentry-app-details'
),
url(
r'^sentry-apps/(?P<sentry_app_slug>[^\/]+)/features/$',
SentryAppFeaturesEndpoint.as_view(),
name='sentry-api-0-sentry-app-features'
),
url(
r'^sentry-apps/(?P<sentry_app_slug>[^\/]+)/components/$',
SentryAppComponentsEndpoint.as_view(),
name='sentry-api-0-sentry-app-components'
),
url(
r'^organizations/(?P<organization_slug>[^\/]+)/sentry-app-components/$',
OrganizationSentryAppComponentsEndpoint.as_view(),
name='sentry-api-0-org-sentry-app-components'
),
url(
r'^sentry-app-installations/(?P<uuid>[^\/]+)/authorizations/$',
SentryAppAuthorizationsEndpoint.as_view(),
name='sentry-api-0-sentry-app-authorizations'
),
# Grouping configs
url(
r'^grouping-configs/$', GroupingConfigsEndpoint.as_view(),
name='sentry-api-0-grouping-configs'
),
url(
r'^grouping-enhancements/$', GroupingEnhancementsEndpoint.as_view(),
name='sentry-api-0-grouping-enhancements'
),
# Symbolicator Builtin Sources
url(
r'^builtin-symbol-sources/$', BuiltinSymbolSourcesEndpoint.as_view(),
name='sentry-api-0-builtin-symbol-sources',
),
# Internal
url(r'^internal/health/$', SystemHealthEndpoint.as_view(),
name='sentry-api-0-system-health'),
url(
r'^internal/options/$', SystemOptionsEndpoint.as_view(), name='sentry-api-0-system-options'
),
url(r'^internal/quotas/$', InternalQuotasEndpoint.as_view()),
url(r'^internal/queue/tasks/$', InternalQueueTasksEndpoint.as_view()),
url(r'^internal/stats/$', InternalStatsEndpoint.as_view(),
name='sentry-api-0-internal-stats'),
# Project Wizard
url(
r'^wizard/$',
SetupWizard.as_view(),
name='sentry-api-0-project-wizard-new'
),
url(
r'^wizard/(?P<wizard_hash>[^\/]+)/$',
SetupWizard.as_view(),
name='sentry-api-0-project-wizard'
),
# Catch all
url(r'^$', IndexEndpoint.as_view(), name='sentry-api-index'),
url(r'^', CatchallEndpoint.as_view(), name='sentry-api-catchall'),
# url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
)
|
py
|
1a559fa823ec02d17b17bb512dc0a2d66ac7fa0a
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
import math
from typing import Callable, Dict, Iterator, List, Tuple, Union
import oneflow as flow
from oneflow.nn.optimizer.optimizer import Optimizer, ParamGroup
from oneflow.nn.parameter import Parameter
class AdamW(Optimizer):
"""Implements AdamW algorithm.
The original Adam algorithm was proposed in `Adam: A Method for Stochastic Optimization`_.
The AdamW variant was proposed in `Decoupled Weight Decay Regularization`_.
The optimizer of the Adam-weight-decay algorithm.
(More details please refer to `Adam-weight-decay <https://www.fast.ai/2018/07/02/adam-weight-decay/>`_).
So we use Adam-weight-decay algorithm to solve this problem.
the equation of parameters updating is:
.. math::
& V_t = \\beta_1*V_{t-1} + (1-\\beta_1)*grad
& S_t = \\beta_2*S_{t-1} + (1-\\beta_2)*{grad} \\odot {grad}
& \\hat{g} = learning\\_rate*(\\frac{{V_t}}{\\sqrt{{S_t}}+\\epsilon}+\\lambda*param_{old})
& param_{new} = param_{old} - \\hat{g}
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (In the equation is λ, default: 0)
amsgrad (bool, optional): whether to use the AMSGrad variant of this algorithm. (default: False)
do_bias_correction (bool, optional): Whether do bias correction (default: True)
.. _Adam\\: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _Decoupled Weight Decay Regularization:
https://arxiv.org/abs/1711.05101
For example:
Example 1:
.. code-block:: python
# Assume net is a custom model.
adamw = flow.optim.AdamW(net.parameters(), lr=1e-3)
for epoch in range(epochs):
# Read data, Compute the loss and so on.
# ...
loss.backward()
adamw.step()
adamw.zero_grad()
Example 2:
.. code-block:: python
# Assume net is a custom model.
adamw = flow.optim.AdamW(
[
{
"params": net.parameters(),
"lr": learning_rate,
"clip_grad_max_norm": 0.5,
"clip_grad_norm_type": 2.0,
}
],
)
for epoch in range(epochs):
# Read data, Compute the loss and so on.
# ...
loss.backward()
adamw.clip_grad()
adamw.step()
adamw.zero_grad()
If you want to use clip_grad, you can refer this example.
For more details of `clip_grad_max_norm` and `clip_grad_norm_type`, you can refer to :func:`oneflow.nn.utils.clip_grad_norm_`.
"""
def __init__(
self,
params: Union[Iterator[Parameter], List[Dict]],
lr: float = 0.001,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-08,
weight_decay: float = 0,
amsgrad: bool = False,
do_bias_correction: bool = True,
):
assert lr >= 0.0, f"Invalid learning rate: {lr}"
assert eps >= 0.0, f"Invalid epsilon value: {eps}"
assert (
betas[0] >= 0.0 and betas[0] < 1.0
), f"Invalid beta parameter at index 0: {betas[0]}"
assert (
betas[1] >= 0.0 and betas[1] < 1.0
), f"Invalid beta parameter at index 1: {betas[1]}"
assert weight_decay >= 0.0, f"Invalid weight_decay value: {weight_decay}"
options = dict()
options["lr"] = lr
options["eps"] = eps
options["betas"] = betas
options["weight_decay"] = weight_decay
options["bias_correction1"] = 1.0
options["bias_correction2"] = 1.0
options["do_bias_correction"] = do_bias_correction
options["amsgrad"] = amsgrad
super().__init__(params, options)
for param_group in self.param_groups:
for param in param_group.parameters:
assert param.is_leaf, "parameters must be leaf tensor"
self._state[param] = dict()
self._op_with_amsgrad = (
flow.stateful_op("adam_update")
.Input("model")
.Input("model_diff")
.Input("m")
.Input("v")
.Input("max_v")
.Build()
)
self._op_without_amsgrad = (
flow.stateful_op("adam_update")
.Input("model")
.Input("model_diff")
.Input("m")
.Input("v")
.Build()
)
def step(self, closure: Callable = None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
with flow.no_grad():
loss = None
if closure is not None:
loss = closure()
for param_group in self.param_groups:
if param_group["do_bias_correction"]:
param_group["bias_correction1"] = 1.0 - math.pow(
param_group["betas"][0], self._state["step"] + 1
)
param_group["bias_correction2"] = 1.0 - math.pow(
param_group["betas"][1], self._state["step"] + 1
)
kwargs = {
"learning_rate": param_group["lr"],
"bias_correction1": param_group["bias_correction1"],
"bias_correction2": param_group["bias_correction2"],
"weight_decay": param_group["weight_decay"],
"beta1": param_group["betas"][0],
"beta2": param_group["betas"][1],
"epsilon": param_group["eps"],
"do_bias_correction": param_group["do_bias_correction"],
"amsgrad": param_group["amsgrad"],
}
for param in param_group.parameters:
if param.grad is None:
continue
if "exp_avg" not in self._state[param]:
self._state[param]["exp_avg"] = flow.zeros_like(param)
if "exp_avg_sq" not in self._state[param]:
self._state[param]["exp_avg_sq"] = flow.zeros_like(param)
if param_group["amsgrad"]:
if "max_exp_avg_sq" not in self._state[param]:
self._state[param]["max_exp_avg_sq"] = flow.zeros_like(
param
)
m_tensor = self._state[param]["exp_avg"]
v_tensor = self._state[param]["exp_avg_sq"]
if param_group["amsgrad"]:
max_v_tensor = self._state[param]["max_exp_avg_sq"]
flow._C.dispatch_adam_update(
self._op_with_amsgrad,
(param, param.grad, m_tensor, v_tensor, max_v_tensor),
**kwargs,
)
else:
flow._C.dispatch_adam_update(
self._op_without_amsgrad,
(param, param.grad, m_tensor, v_tensor),
**kwargs,
)
self._state["step"] += 1
return loss
def _generate_conf_for_graph(self, train_conf, vars_conf):
new_opt_confs = []
for param_group in self.param_groups:
optimizer_conf = train_conf.mutable_optimizer_conf().Add()
lr = (
param_group["initial_lr"]
if "initial_lr" in param_group
else param_group["lr"]
)
weight_decay = param_group["weight_decay"]
beta1 = param_group["betas"][0]
beta2 = param_group["betas"][1]
epsilon = param_group["eps"]
do_bias_correction = param_group["do_bias_correction"]
amsgrad = param_group["amsgrad"]
optimizer_conf.set_base_learning_rate(lr)
optimizer_conf.mutable_adam_conf().set_beta1(beta1)
optimizer_conf.mutable_adam_conf().set_beta2(beta2)
optimizer_conf.mutable_adam_conf().set_epsilon(epsilon)
optimizer_conf.mutable_adam_conf().set_do_bias_correction(
do_bias_correction
)
optimizer_conf.mutable_adam_conf().set_amsgrad(amsgrad)
optimizer_conf.mutable_weight_decay_conf().set_weight_decay_rate(
weight_decay
)
self._generate_grad_clip_conf_for_optim_conf(param_group, optimizer_conf)
for param in param_group.parameters:
if param.requires_grad:
optimizer_conf.add_variable_op_names(vars_conf[param].name)
new_opt_confs.append(optimizer_conf)
return new_opt_confs
@property
def support_sparse(self):
"""Whether AdamW Optimizer support sparse update.
"""
return True
|
py
|
1a559fc47bb3e60212398594d32b2a5728ee97b8
|
import sys
import re
import numpy as np
import pandas as pd
from scipy.stats import pearsonr
from sklearn.preprocessing import scale
import pytest
from nibabel import Nifti1Image
from nilearn.input_data import NiftiMasker
from nilearn.interfaces.fmriprep import load_confounds
from nilearn.interfaces.fmriprep.load_confounds import _check_strategy
from nilearn._utils.fmriprep_confounds import _to_camel_case
from nilearn.interfaces.fmriprep.tests.utils import (
create_tmp_filepath, get_leagal_confound
)
def _simu_img(tmp_path, demean):
"""Simulate an nifti image based on confound file with some parts confounds
and some parts noise."""
file_nii, _ = create_tmp_filepath(tmp_path, copy_confounds=True)
# set the size of the image matrix
nx = 5
ny = 5
# the actual number of slices will actually be double of that
# as we will stack slices with confounds on top of slices with noise
nz = 2
# Load a simple 6 parameters motion models as confounds
# demean set to False just for simulating signal based on the original
# state
confounds, _ = load_confounds(
file_nii, strategy=("motion", ), motion="basic", demean=False
)
X = _handle_non_steady(confounds)
X = X.values
# the number of time points is based on the example confound file
nt = X.shape[0]
# initialize an empty 4D volume
vol = np.zeros([nx, ny, 2 * nz, nt])
vol_conf = np.zeros([nx, ny, 2 * nz])
vol_rand = np.zeros([nx, ny, 2 * nz])
# create random noise and a random mixture of confounds standardized
# to zero mean and unit variance
if sys.version_info < (3, 7): # fall back to random state for 3.6
np.random.RandomState(42)
beta = np.random.rand(nx * ny * nz, X.shape[1])
tseries_rand = scale(np.random.rand(nx * ny * nz, nt), axis=1)
else:
randome_state = np.random.default_rng(0)
beta = randome_state.random((nx * ny * nz, X.shape[1]))
tseries_rand = scale(randome_state.random((nx * ny * nz, nt)), axis=1)
# create the confound mixture
tseries_conf = scale(np.matmul(beta, X.transpose()), axis=1)
# fill the first half of the 4D data with the random mixture
vol[:, :, 0:nz, :] = tseries_conf.reshape(nx, ny, nz, nt)
vol_conf[:, :, 0:nz] = 1
# create random noise in the second half of the 4D data
vol[:, :, range(nz, 2 * nz), :] = tseries_rand.reshape(nx, ny, nz, nt)
vol_rand[:, :, range(nz, 2 * nz)] = 1
# Shift the mean to non-zero
vol = vol + 10
# create an nifti image with the data, and corresponding mask
img = Nifti1Image(vol, np.eye(4))
mask_conf = Nifti1Image(vol_conf, np.eye(4))
mask_rand = Nifti1Image(vol_rand, np.eye(4))
# generate the associated confounds for testing
test_confounds, _ = load_confounds(
file_nii, strategy=("motion",), motion="basic", demean=demean)
# match how we extend the length to increase the degree of freedom
test_confounds = _handle_non_steady(test_confounds)
sample_mask = np.arange(test_confounds.shape[0])[1:]
return img, mask_conf, mask_rand, test_confounds, sample_mask
def _handle_non_steady(confounds):
"""Simulate non steady state correctly while increase the length."""
X = confounds.values
# the first row is non-steady state, replace it with the input from the
# second row
non_steady = X[0, :]
X[0, :] = X[1, :]
# repeat X in length (axis = 0) 10 times to increase
# the degree of freedom for numerical stability
X = np.tile(X, (10, 1))
# put non-steady state volume back at the first sample
X[0, :] = non_steady
X = pd.DataFrame(X, columns=confounds.columns)
return X
def _regression(confounds, tmp_path):
"""Simple regression with NiftiMasker."""
# Simulate data
img, mask_conf, _, _, _ = _simu_img(tmp_path, demean=False)
confounds = _handle_non_steady(confounds)
# Do the regression
masker = NiftiMasker(mask_img=mask_conf, standardize=True)
tseries_clean = masker.fit_transform(
img, confounds=confounds, sample_mask=None
)
assert tseries_clean.shape[0] == confounds.shape[0]
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize(
"test_strategy,param",
[
(("motion", ), {}),
(("high_pass", ), {}),
(("wm_csf", ), {"wm_csf": "full"}),
(("global_signal", ), {"global_signal": "full"}),
(("high_pass", "compcor", ), {}),
(("high_pass", "compcor", ), {"compcor": "anat_separated"}),
(("high_pass", "compcor", ), {"compcor": "temporal"}),
(("ica_aroma", ), {"ica_aroma": "basic"}),
],
)
def test_nilearn_regress(tmp_path, test_strategy, param):
"""Try regressing out all motion types without sample mask."""
img_nii, _ = create_tmp_filepath(
tmp_path, copy_confounds=True, copy_json=True
)
confounds, _ = load_confounds(img_nii, strategy=test_strategy, **param)
_regression(confounds, tmp_path)
def _tseries_std(img, mask_img, confounds, sample_mask,
standardize_signal=False, standardize_confounds=True,
detrend=False):
"""Get the std of time series in a mask."""
masker = NiftiMasker(
mask_img=mask_img,
standardize=standardize_signal,
standardize_confounds=standardize_confounds,
detrend=detrend
)
tseries = masker.fit_transform(img,
confounds=confounds,
sample_mask=sample_mask)
return tseries.std(axis=0)
def _denoise(img, mask_img, confounds, sample_mask,
standardize_signal=False, standardize_confounds=True,
detrend=False):
"""Extract time series with and without confounds."""
masker = NiftiMasker(mask_img=mask_img,
standardize=standardize_signal,
standardize_confounds=standardize_confounds,
detrend=detrend)
tseries_raw = masker.fit_transform(img, sample_mask=sample_mask)
tseries_clean = masker.fit_transform(
img, confounds=confounds, sample_mask=sample_mask
)
return tseries_raw, tseries_clean
def _corr_tseries(tseries1, tseries2):
"""Compute the correlation between two sets of time series."""
corr = np.zeros(tseries1.shape[1])
for ind in range(tseries1.shape[1]):
corr[ind], _ = pearsonr(tseries1[:, ind], tseries2[:, ind])
return corr
@pytest.mark.filterwarnings("ignore")
def test_nilearn_standardize_false(tmp_path):
"""Test removing confounds with no standardization."""
# niftimasker default:
# standardize=False, standardize_confounds=True, detrend=False
# Simulate data; set demean to False as standardize_confounds=True
(img, mask_conf, mask_rand,
confounds, sample_mask) = _simu_img(tmp_path, demean=False)
# Check that most variance is removed
# in voxels composed of pure confounds
tseries_std = _tseries_std(img, mask_conf, confounds, sample_mask,
standardize_signal=False,
standardize_confounds=True,
detrend=False)
assert np.mean(tseries_std < 0.0001)
# Check that most variance is preserved
# in voxels composed of random noise
tseries_std = _tseries_std(img, mask_rand, confounds, sample_mask,
standardize_signal=False,
standardize_confounds=True,
detrend=False)
assert np.mean(tseries_std > 0.9)
@pytest.mark.filterwarnings("ignore")
@pytest.mark.parametrize("standardize_signal", ["zscore", "psc"])
@pytest.mark.parametrize("standardize_confounds,detrend", [(True, False),
(False, True),
(True, True)])
def test_nilearn_standardize(tmp_path, standardize_signal,
standardize_confounds, detrend):
"""Test confounds removal with logical parameters for processing signal."""
# demean is set to False to let signal.clean handle everything
(img, mask_conf, mask_rand, confounds, mask) = _simu_img(tmp_path,
demean=False)
# We now load the time series with vs without confounds
# in voxels composed of pure confounds
# the correlation before and after denoising should be very low
# as most of the variance is removed by denoising
tseries_raw, tseries_clean = _denoise(
img, mask_conf, confounds, mask,
standardize_signal=standardize_signal,
standardize_confounds=standardize_confounds,
detrend=detrend)
corr = _corr_tseries(tseries_raw, tseries_clean)
assert np.absolute(np.mean(corr)) < 0.2
# We now load the time series with zscore standardization
# with vs without confounds in voxels where the signal is uncorrelated
# with confounds. The correlation before and after denoising should be very
# high as very little of the variance is removed by denoising
tseries_raw, tseries_clean = _denoise(
img, mask_rand, confounds, mask,
standardize_signal=standardize_signal,
standardize_confounds=standardize_confounds,
detrend=detrend)
corr = _corr_tseries(tseries_raw, tseries_clean)
assert corr.mean() > 0.8
def test_confounds2df(tmp_path):
"""Check auto-detect of confonds from an fMRI nii image."""
img_nii, _ = create_tmp_filepath(tmp_path, copy_confounds=True)
confounds, _ = load_confounds(img_nii)
assert "trans_x" in confounds.columns
@pytest.mark.parametrize("strategy,message",
[(["string", ], "not a supported type of confounds."),
("error", "tuple or list of strings"),
((0, ), "not a supported type of confounds."),
(("compcor", ), "high_pass")])
def test_check_strategy(strategy, message):
"""Check that flawed strategy options generate meaningful error
messages."""
with pytest.raises(ValueError) as exc_info:
_check_strategy(strategy=strategy)
assert message in exc_info.value.args[0]
SUFFIXES = np.array(["", "_derivative1", "_power2", "_derivative1_power2"])
@pytest.fixture
def expected_suffixes(motion):
expectation = {
"basic": slice(1),
"derivatives": slice(2),
"power2": np.array([True, False, True, False]),
"full": slice(4),
}
return SUFFIXES[expectation[motion]]
@pytest.mark.parametrize("motion", ["basic", "derivatives", "power2", "full"])
@pytest.mark.parametrize(
"param", ["trans_x", "trans_y", "trans_z", "rot_x", "rot_y", "rot_z"]
)
def test_motion(tmp_path, motion, param, expected_suffixes):
img_nii, _ = create_tmp_filepath(tmp_path, copy_confounds=True)
conf, _ = load_confounds(
img_nii, strategy=("motion", ), motion=motion
)
for suff in SUFFIXES:
if suff in expected_suffixes:
assert f"{param}{suff}" in conf.columns
else:
assert f"{param}{suff}" not in conf.columns
@pytest.mark.parametrize("compcor,n_compcor,test_keyword,test_n",
[("anat_combined", 2, "a_comp_cor_", 2),
("anat_combined", "all", "a_comp_cor_", 57),
("temporal", "all", "t_comp_cor_", 6)])
def test_n_compcor(tmp_path, compcor, n_compcor, test_keyword, test_n):
img_nii, _ = create_tmp_filepath(
tmp_path, copy_confounds=True, copy_json=True
)
conf, _ = load_confounds(
img_nii, strategy=("high_pass", "compcor", ), compcor=compcor,
n_compcor=n_compcor
)
assert sum(True for col in conf.columns if test_keyword in col) == test_n
def test_not_found_exception(tmp_path):
"""Check various file or parameter missing scenario."""
# Create invalid confound file in temporary dir
img_missing_confounds, bad_conf = create_tmp_filepath(
tmp_path, copy_confounds=True, copy_json=False
)
missing_params = ["trans_y", "trans_x_derivative1", "rot_z_power2"]
missing_keywords = ["cosine"]
leagal_confounds = pd.read_csv(bad_conf, delimiter="\t", encoding="utf-8")
cosine = [
col_name
for col_name in leagal_confounds.columns
if "cosine" in col_name
]
aroma = [
col_name
for col_name in leagal_confounds.columns
if "aroma" in col_name
]
missing_confounds = leagal_confounds.drop(
columns=missing_params + cosine + aroma
)
missing_confounds.to_csv(bad_conf, sep="\t", index=False)
with pytest.raises(ValueError) as exc_info:
load_confounds(
img_missing_confounds,
strategy=("high_pass", "motion", "global_signal", ),
global_signal="full",
motion="full",
)
assert f"{missing_params}" in exc_info.value.args[0]
assert f"{missing_keywords}" in exc_info.value.args[0]
# loading anat compcor should also raise an error, because the json file is
# missing for that example dataset
with pytest.raises(ValueError):
load_confounds(
img_missing_confounds,
strategy=("high_pass", "compcor"),
compcor="anat_combined",
)
# catch invalid compcor option
with pytest.raises(KeyError):
load_confounds(
img_missing_confounds, strategy=("high_pass", "compcor"),
compcor="blah"
)
# Aggressive ICA-AROMA strategy requires
# default nifti and noise ICs in confound file
# correct nifti but missing noise regressor
with pytest.raises(ValueError) as exc_info:
load_confounds(
img_missing_confounds, strategy=("ica_aroma", ), ica_aroma="basic"
)
assert "aroma" in exc_info.value.args[0]
# Aggressive ICA-AROMA strategy requires
# default nifti
aroma_nii, _ = create_tmp_filepath(
tmp_path, image_type="ica_aroma", suffix="aroma"
)
with pytest.raises(ValueError) as exc_info:
load_confounds(
aroma_nii, strategy=("ica_aroma", ), ica_aroma="basic"
)
assert "Invalid file type" in exc_info.value.args[0]
# non aggressive ICA-AROMA strategy requires
# desc-smoothAROMAnonaggr nifti file
with pytest.raises(ValueError) as exc_info:
load_confounds(
img_missing_confounds, strategy=("ica_aroma", ), ica_aroma="full"
)
assert "desc-smoothAROMAnonaggr_bold" in exc_info.value.args[0]
# no confound files along the image file
(tmp_path / bad_conf).unlink()
with pytest.raises(ValueError) as exc_info:
load_confounds(img_missing_confounds)
assert "Could not find associated confound file." in exc_info.value.args[0]
def test_non_steady_state(tmp_path):
"""Warn when 'non_steady_state' is in strategy."""
# supplying 'non_steady_state' in strategy is not necessary
# check warning is correctly raised
img, conf = create_tmp_filepath(
tmp_path, copy_confounds=True
)
warning_message = (r"Non-steady state")
with pytest.warns(UserWarning, match=warning_message):
load_confounds(img, strategy=('non_steady_state', 'motion'))
def test_load_non_nifti(tmp_path):
"""Test non-nifti and invalid file type as input."""
# tsv file - unsupported input
_, tsv = create_tmp_filepath(tmp_path, copy_confounds=True, copy_json=True)
with pytest.raises(ValueError):
load_confounds(str(tsv))
# cifti file should be supported
cifti, _ = create_tmp_filepath(
tmp_path, image_type="cifti", copy_confounds=True, copy_json=True
)
conf, _ = load_confounds(cifti)
assert conf.size != 0
# gifti support
gifti, _ = create_tmp_filepath(
tmp_path, image_type="gifti", copy_confounds=True, copy_json=True
)
conf, _ = load_confounds(gifti)
assert conf.size != 0
def test_invalid_filetype(tmp_path):
"""Invalid file types/associated files for load method."""
bad_nii, bad_conf = create_tmp_filepath(tmp_path, copy_confounds=True)
conf, _ = load_confounds(bad_nii)
# more than one legal filename for confounds
add_conf = "test_desc-confounds_timeseries.tsv"
leagal_confounds, _ = get_leagal_confound()
leagal_confounds.to_csv(tmp_path / add_conf, sep="\t", index=False)
with pytest.raises(ValueError) as info:
load_confounds(bad_nii)
assert "more than one" in str(info.value)
(tmp_path / add_conf).unlink() # Remove for the rest of the tests to run
# invalid fmriprep version: confound file with no header (<1.0)
fake_confounds = np.random.rand(30, 20)
np.savetxt(bad_conf, fake_confounds, delimiter="\t")
with pytest.raises(ValueError) as error_log:
load_confounds(bad_nii)
assert "The confound file contains no header." in str(error_log.value)
# invalid fmriprep version: old camel case header (<1.2)
leagal_confounds, _ = get_leagal_confound()
camel_confounds = leagal_confounds.copy()
camel_confounds.columns = [
_to_camel_case(col_name) for col_name in leagal_confounds.columns
]
camel_confounds.to_csv(bad_conf, sep="\t", index=False)
with pytest.raises(ValueError) as error_log:
load_confounds(bad_nii)
assert "contains header in camel case." in str(error_log.value)
# create a empty nifti file with no associated confound file
# We only need the path to check this
no_conf = "no_confound_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz"
no_confound = tmp_path / no_conf
no_confound.touch()
with pytest.raises(ValueError):
load_confounds(bad_nii)
def test_ica_aroma(tmp_path):
"""Test ICA AROMA related file input."""
aroma_nii, _ = create_tmp_filepath(
tmp_path, image_type="ica_aroma", copy_confounds=True
)
regular_nii, _ = create_tmp_filepath(
tmp_path, image_type="regular", copy_confounds=True
)
# Aggressive strategy
conf, _ = load_confounds(
regular_nii, strategy=("ica_aroma", ), ica_aroma="basic"
)
for col_name in conf.columns:
# only aroma and non-steady state columns will be present
assert re.match("(?:aroma_motion_+|non_steady_state+)", col_name)
# Non-aggressive strategy
conf, _ = load_confounds(
aroma_nii, strategy=("ica_aroma", ), ica_aroma="full"
)
assert conf.size == 0
# invalid combination of strategy and option
with pytest.raises(ValueError) as exc_info:
conf, _ = load_confounds(
regular_nii, strategy=("ica_aroma", ), ica_aroma="invalid"
)
assert "Current input: invalid" in exc_info.value.args[0]
def test_sample_mask(tmp_path):
"""Test load method and sample mask."""
regular_nii, regular_conf = create_tmp_filepath(
tmp_path, image_type="regular", copy_confounds=True
)
reg, mask = load_confounds(
regular_nii, strategy=("motion", "scrub"), scrub=5, fd_threshold=0.15
)
# the current test data has 6 time points marked as motion outliers,
# and one nonsteady state (overlap with the first motion outlier)
# 2 time points removed due to the "full" srubbing strategy (remove segment
# shorter than 5 volumes)
assert reg.shape[0] - len(mask) == 8
# nilearn requires unmasked confound regressors
assert reg.shape[0] == 30
# non steady state will always be removed
reg, mask = load_confounds(regular_nii, strategy=("motion", ))
assert reg.shape[0] - len(mask) == 1
# When no non-steady state volumes are present
conf_data, _ = get_leagal_confound(non_steady_state=False)
conf_data.to_csv(regular_conf, sep="\t", index=False) # save to tmp
reg, mask = load_confounds(regular_nii, strategy=("motion", ))
assert mask is None
# When no volumes needs removing (very liberal motion threshould)
reg, mask = load_confounds(
regular_nii, strategy=("motion", "scrub"), scrub=0, fd_threshold=4
)
assert mask is None
@pytest.mark.parametrize(
"image_type", ["regular", "ica_aroma", "gifti", "cifti"]
)
def test_inputs(tmp_path, image_type):
"""Test multiple images as input."""
# generate files
files = []
for i in range(2): # gifti edge case
nii, _ = create_tmp_filepath(
tmp_path,
suffix=f"img{i+1}",
image_type=image_type,
copy_confounds=True,
copy_json=True,
)
files.append(nii)
if image_type == "ica_aroma":
conf, _ = load_confounds(files, strategy=("ica_aroma", ))
else:
conf, _ = load_confounds(files)
assert len(conf) == 2
|
py
|
1a55a08019c44e162fe054c9ed697fb9eb0b1dc5
|
# Copyright (c) 2018, NVIDIA CORPORATION.
from __future__ import print_function, division
import inspect
import pytest
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
from itertools import product
import cudf
from cudf import queryutils
from cudf.dataframe import DataFrame
_params_query_parser = []
_params_query_parser.append(('a > @b', ('a', '__CUDF_ENVREF__b')))
_params_query_parser.append(('(a + b) <= @c', ('a', 'b', '__CUDF_ENVREF__c')))
_params_query_parser.append(('a > b if a > 0 else b > a', ('a', 'b')))
@pytest.mark.parametrize('text,expect_args', _params_query_parser)
def test_query_parser(text, expect_args):
info = queryutils.query_parser(text)
fn = queryutils.query_builder(info, 'myfoo')
assert callable(fn)
argspec = inspect.getfullargspec(fn)
assert tuple(argspec.args) == tuple(expect_args)
params_query_data = list(product([1, 2, 7, 8, 9, 16, 100, 129], range(2)))
params_query_fn = [
(lambda a, b: a < b, 'a < b'),
(lambda a, b: a * 2 >= b, 'a * 2 >= b'),
(lambda a, b: 2 * (a + b) > (a + b) / 2, '2 * (a + b) > (a + b) / 2'),
]
@pytest.mark.parametrize('data,fn',
product(params_query_data, params_query_fn))
def test_query(data, fn):
# prepare
nelem, seed = data
expect_fn, query_expr = fn
np.random.seed(seed)
df = DataFrame()
df['a'] = aa = np.arange(nelem)
df['b'] = bb = np.random.random(nelem) * nelem
# udt
expect_mask = expect_fn(aa, bb)
df2 = df.query(query_expr)
# check
assert len(df2) == np.count_nonzero(expect_mask)
np.testing.assert_array_almost_equal(df2['a'].to_array(), aa[expect_mask])
np.testing.assert_array_almost_equal(df2['b'].to_array(), bb[expect_mask])
params_query_env_fn = [
(lambda a, b, c, d: a * c > b + d,
'a * @c > b + @d'),
(lambda a, b, c, d: ((a / c) < d) | ((b ** c) > d),
'((a / @c) < @d) | ((b ** @c) > @d)')
]
@pytest.mark.parametrize('data,fn',
product(params_query_data, params_query_env_fn))
def test_query_ref_env(data, fn):
# prepare
nelem, seed = data
expect_fn, query_expr = fn
np.random.seed(seed)
df = DataFrame()
df['a'] = aa = np.arange(nelem)
df['b'] = bb = np.random.random(nelem) * nelem
c = 2.3
d = 1.2
# udt
expect_mask = expect_fn(aa, bb, c, d)
print(expect_mask)
df2 = df.query(query_expr)
# check
assert len(df2) == np.count_nonzero(expect_mask)
np.testing.assert_array_almost_equal(df2['a'].to_array(), aa[expect_mask])
np.testing.assert_array_almost_equal(df2['b'].to_array(), bb[expect_mask])
def test_query_env_changing():
df = DataFrame()
df['a'] = aa = np.arange(100)
expr = 'a < @c'
# first attempt
c = 10
got = df.query(expr)
np.testing.assert_array_equal(aa[aa < c], got['a'].to_array())
# change env
c = 50
got = df.query(expr)
np.testing.assert_array_equal(aa[aa < c], got['a'].to_array())
def test_query_splitted_combine():
np.random.seed(0)
df = pd.DataFrame({'x': np.random.randint(0, 5, size=10),
'y': np.random.normal(size=10)})
gdf = DataFrame.from_pandas(df)
# Split the GDF
s1 = gdf[:5]
s2 = gdf[5:]
# Do the query
expr = 'x > 2'
q1 = s1.query(expr)
q2 = s2.query(expr)
# Combine
got = cudf.concat([q1, q2]).to_pandas()
# Should equal to just querying the original GDF
expect = gdf.query(expr).to_pandas()
assert_frame_equal(got, expect)
|
py
|
1a55a0fccc5c95f93b2dee0e06b78515a82b48de
|
#!/usr/bin/env python
#
# Copyright 2020 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
#
# Produce messages to Confluent Cloud
# Using Confluent Python Client for Apache Kafka
#
# =============================================================================
from confluent_kafka import Producer, KafkaError
import json
import ccloud_lib
if __name__ == '__main__':
# Read arguments and configurations and initialize
args = ccloud_lib.parse_args()
config_file = args.config_file
topic = args.topic
conf = ccloud_lib.read_ccloud_config(config_file)
# Create Producer instance
producer = Producer({
'bootstrap.servers': conf['bootstrap.servers'],
'sasl.mechanisms': conf['sasl.mechanisms'],
'security.protocol': conf['security.protocol'],
'sasl.username': conf['sasl.username'],
'sasl.password': conf['sasl.password'],
})
# Create topic if needed
ccloud_lib.create_topic(conf, topic)
delivered_records = 0
# Optional per-message on_delivery handler (triggered by poll() or flush())
# when a message has been successfully delivered or
# permanently failed delivery (after retries).
def acked(err, msg):
global delivered_records
"""Delivery report handler called on
successful or failed delivery of message
"""
if err is not None:
print("Failed to deliver message: {}".format(err))
else:
delivered_records += 1
print("Produced record to topic {} partition [{}] @ offset {}"
.format(msg.topic(), msg.partition(), msg.offset()))
for n in range(10):
record_key = "alice"
record_value = json.dumps({'count': n})
print("Producing record: {}\t{}".format(record_key, record_value))
producer.produce(topic, key=record_key, value=record_value, on_delivery=acked)
# p.poll() serves delivery reports (on_delivery)
# from previous produce() calls.
producer.poll(0)
producer.flush()
print("{} messages were produced to topic {}!".format(delivered_records, topic))
|
py
|
1a55a139ef81501a106b0a1d180117cac359935b
|
import argparse
import os
import numpy as np
import math
import torchvision.transforms as transforms
from torchvision.utils import save_image
from PIL import Image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from datasets import *
from models import *
import torch.nn as nn
import torch.nn.functional as F
import torch
os.makedirs("images", exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument("--n_epochs", type=int, default=200, help="number of epochs of training")
parser.add_argument("--batch_size", type=int, default=8, help="size of the batches")
parser.add_argument("--dataset_name", type=str, default="img_align_celeba", help="name of the dataset")
parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation")
parser.add_argument("--latent_dim", type=int, default=100, help="dimensionality of the latent space")
parser.add_argument("--img_size", type=int, default=128, help="size of each image dimension")
parser.add_argument("--mask_size", type=int, default=32, help="size of random mask")
parser.add_argument("--channels", type=int, default=3, help="number of image channels")
parser.add_argument("--sample_interval", type=int, default=500, help="interval between image sampling")
opt = parser.parse_args()
print(opt)
cuda = True if torch.cuda.is_available() else False
input_shape = (opt.channels, opt.img_size, opt.img_size)
# Loss function
adversarial_loss = torch.nn.MSELoss()
# Initialize generator and discriminator
generator = Generator(input_shape)
discriminator = Discriminator(input_shape)
if cuda:
generator.cuda()
discriminator.cuda()
adversarial_loss.cuda()
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
# Dataset loader
transforms_ = [
transforms.Resize((opt.img_size, opt.img_size), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
transforms_lr = [
transforms.Resize((opt.img_size // 4, opt.img_size // 4), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
dataloader = DataLoader(
ImageDataset("../../data/%s" % opt.dataset_name, transforms_x=transforms_, transforms_lr=transforms_lr),
batch_size=opt.batch_size,
shuffle=True,
num_workers=opt.n_cpu,
)
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
def apply_random_mask(imgs):
idx = np.random.randint(0, opt.img_size - opt.mask_size, (imgs.shape[0], 2))
masked_imgs = imgs.clone()
for i, (y1, x1) in enumerate(idx):
y2, x2 = y1 + opt.mask_size, x1 + opt.mask_size
masked_imgs[i, :, y1:y2, x1:x2] = -1
return masked_imgs
def save_sample(saved_samples):
# Generate inpainted image
gen_imgs = generator(saved_samples["masked"], saved_samples["lowres"])
# Save sample
sample = torch.cat((saved_samples["masked"].data, gen_imgs.data, saved_samples["imgs"].data), -2)
save_image(sample, "images/%d.png" % batches_done, nrow=5, normalize=True)
saved_samples = {}
for epoch in range(opt.n_epochs):
for i, batch in enumerate(dataloader):
imgs = batch["x"]
imgs_lr = batch["x_lr"]
masked_imgs = apply_random_mask(imgs)
# Adversarial ground truths
valid = Variable(Tensor(imgs.shape[0], *discriminator.output_shape).fill_(1.0), requires_grad=False)
fake = Variable(Tensor(imgs.shape[0], *discriminator.output_shape).fill_(0.0), requires_grad=False)
if cuda:
imgs = imgs.type(Tensor)
imgs_lr = imgs_lr.type(Tensor)
masked_imgs = masked_imgs.type(Tensor)
real_imgs = Variable(imgs)
imgs_lr = Variable(imgs_lr)
masked_imgs = Variable(masked_imgs)
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Generate a batch of images
gen_imgs = generator(masked_imgs, imgs_lr)
# Loss measures generator's ability to fool the discriminator
g_loss = adversarial_loss(discriminator(gen_imgs), valid)
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Measure discriminator's ability to classify real from generated samples
real_loss = adversarial_loss(discriminator(real_imgs), valid)
fake_loss = adversarial_loss(discriminator(gen_imgs.detach()), fake)
d_loss = 0.5 * (real_loss + fake_loss)
d_loss.backward()
optimizer_D.step()
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]"
% (epoch, opt.n_epochs, i, len(dataloader), d_loss.item(), g_loss.item())
)
# Save first ten samples
if not saved_samples:
saved_samples["imgs"] = real_imgs[:1].clone()
saved_samples["masked"] = masked_imgs[:1].clone()
saved_samples["lowres"] = imgs_lr[:1].clone()
elif saved_samples["imgs"].size(0) < 10:
saved_samples["imgs"] = torch.cat((saved_samples["imgs"], real_imgs[:1]), 0)
saved_samples["masked"] = torch.cat((saved_samples["masked"], masked_imgs[:1]), 0)
saved_samples["lowres"] = torch.cat((saved_samples["lowres"], imgs_lr[:1]), 0)
batches_done = epoch * len(dataloader) + i
if batches_done % opt.sample_interval == 0:
save_sample(saved_samples)
|
py
|
1a55a189e26e20a331f3abb4db3bed6b2669ec32
|
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 22 14:23:32 2018
CSV module handles parsing better as delimiter can be part of data as well
@author: dongrp2
"""
import csv
with open('names.csv','r') as names_csv:
csv_reader = csv.reader(names_csv)
next(csv_reader) # to loop over the first line which is headder
for line in csv_reader:
#print(line) #print all the lines in list
print(line[2]) # 2nd index of the list
"""Creating a tab delimited file"""
with open('names.csv','r') as names_csv:
csv_reader = csv.reader(names_csv)
with open('new_names.csv','w') as new_names:
csv_writer = csv.writer(new_names, delimiter='\t')
for line in csv_reader:
csv_writer.writerow(line)
# with open('new_names.csv','r') as read_new_csv:
# csv_new_reader = csv.reader(read_new_csv,delimiter='\t')
# for line in csv_new_reader:
# print(line)
"""Using Dictionary Reader where field names are keys of the values"""
with open('names.csv','r') as names_csv:
csv_reader = csv.DictReader(names_csv)
for line in csv_reader:
print(line['email']) #accesing the key 'email'
"""Using Dictionary Writer have to provide the field names"""
with open('names.csv','r') as names_csv:
csv_reader = csv.DictReader(names_csv)
with open('new_names_dict.csv','w') as new_names_dict:
fieldnames = ['first_name','last_name','email']
csv_writer = csv.DictWriter(new_names_dict, fieldnames=fieldnames, delimiter='\t')
csv_writer.writeheader()
for line in csv_reader:
#del line['email'] if want to write only first and last name
csv_writer.writerow(line)
#for line in csv_reader:
# print(line['email']) #accesing the key 'email'
|
py
|
1a55a2f230de02eaa710f63af5a521766c430758
|
from typing import List, cast
from avtocod.methods.base import AvtocodMethod, AvtocodType, Request
from avtocod.types.profile.profile import Balance, BalanceItem
class GetBalance(AvtocodMethod[List[BalanceItem]]):
"""Get the current avtocod repair balance"""
__returning__ = Balance
def build_request(self) -> Request:
return Request(method="profile.balance")
@classmethod
def on_response_parse(cls, response: AvtocodType) -> List[BalanceItem]:
return cast(List[BalanceItem], response.balance)
|
py
|
1a55a3d82cbe7b230399083345ef0d8edd2a5607
|
# -*- coding: utf-8 -*-
from tests import HangulizeTestCase
from hangulize.langs.slk import Slovak
class SlovakTestCase(HangulizeTestCase):
lang = Slovak()
def test_people(self):
self.assert_examples({
'Ján Bahýľ': '얀 바힐',
'Štefan Banič': '슈테판 바니치',
'Anton Bernolák': '안톤 베르놀라크',
'Peter Bondra': '페테르 본드라',
'Zdeno Chára': '즈데노 하라',
'Dominika Cibulková': '도미니카 치불코바',
'Ján Čarnogurský': '얀 차르노구르스키',
'Štefan Marko Daxner': '슈테판 마르코 닥스네르',
'Pavol Demitra': '파볼 데미트라',
'Alexander Dubček': '알렉산데르 둡체크',
'Mikuláš Dzurinda': '미쿨라시 주린다',
'Marián Gáborík': '마리안 가보리크',
'Marek Hamšík': '마레크 함시크',
'Daniela Hantuchová': '다니엘라 한투호바',
'Andrej Hlinka': '안드레이 흘린카',
'Milan Hodža': '밀란 호자',
'Marian Hossa': '마리안 호사',
'Dominik Hrbatý': '도미니크 흐르바티',
'Pavol Hurajt': '파볼 후라이트',
'Jozef Miloslav Hurban': '요제프 밀로슬라우 후르반',
'Gustáv Husák': '구스타우 후사크',
'Hviezdoslav': '흐비에즈도슬라우',
'Dionýz Ilkovič': '디오니스 일코비치',
'Elena Kaliská': '엘레나 칼리스카',
'Michaela Kocianová': '미하엘라 코치아노바',
'Karol Kučera': '카롤 쿠체라',
'Anastasiya Kuzmina': '아나스타시야 쿠즈미나',
'Michal Martikán': '미할 마르티칸',
'Janko Matúška': '얀코 마투슈카',
'Vladimír Mečiar': '블라디미르 메치아르',
'Martina Moravcová': '마르티나 모라우초바',
'Jozef Murgaš': '요제프 무르가시',
'Natália Prekopová': '나탈리아 프레코포바',
'Jozef Roháček': '요제프 로하체크',
'Magdaléna Rybáriková': '마그달레나 리바리코바',
'Zuzana Sekerová': '주자나 세케로바',
'Aurel Stodola': '아우렐 스토돌라',
'Eugen Suchoň': '에우겐 수혼',
'Martin Škrtel': '마르틴 슈크르텔',
'Milan Rastislav Štefánik': '밀란 라스티슬라우 슈테파니크',
'Zuzana Štefečeková': '주자나 슈테페체코바',
'Peter Šťastný': '페테르 슈탸스트니',
'Ľudovít Štúr': '류도비트 슈투르',
'Jozef Tiso': '요제프 티소',
'Vavrinec': '바우리네츠',
'Rudolf Vrba': '루돌프 브르바',
'Vladimír Weiss': '블라디미르 베이스',
})
def test_places(self):
self.assert_examples({
'Banská Bystrica': '반스카 비스트리차',
'Bardejov': '바르데요우',
'Bratislava': '브라티슬라바',
'Komárno': '코마르노',
'Košice': '코시체',
'Manínska tiesňava': '마닌스카 티에스냐바',
'Martin': '마르틴',
'Michalovce': '미할로우체',
'Nitra': '니트라',
'Poprad': '포프라트',
'Považská': '포바슈스카',
'Prešov': '프레쇼우',
'Rožňava': '로주냐바',
'Slavín': '슬라빈',
'Spiš': '스피시',
'Trenčín': '트렌친',
'Trnava': '트르나바',
'Váh': '바흐',
'Vlkolínec': '블콜리네츠',
'Vydrica': '비드리차',
'Zvolen': '즈볼렌',
'Žilina': '질리나',
'Žehra': '제흐라',
})
def test_miscellaneous(self):
self.assert_examples({
'deväť': '데베티',
'jahôd': '야후오트',
'mäkčeň': '멕첸',
'pätnásť': '페트나스티',
})
|
py
|
1a55a4950b13ac7573ae51b4ea7b1b0b134f1f6f
|
import urllib
import itertools
import json
import jinja2
from datasette.plugins import pm
from datasette.database import QueryInterrupted
from datasette.utils import (
CustomRow,
MultiParams,
append_querystring,
compound_keys_after_sql,
escape_sqlite,
filters_should_redirect,
is_url,
path_from_row_pks,
path_with_added_args,
path_with_removed_args,
path_with_replaced_args,
sqlite3,
to_css_class,
urlsafe_components,
value_as_boolean,
)
from datasette.utils.asgi import NotFound
from datasette.filters import Filters
from .base import DataView, DatasetteError, ureg
from .database import QueryView
LINK_WITH_LABEL = (
'<a href="{base_url}{database}/{table}/{link_id}">{label}</a> <em>{id}</em>'
)
LINK_WITH_VALUE = '<a href="{base_url}{database}/{table}/{link_id}">{id}</a>'
class Row:
def __init__(self, cells):
self.cells = cells
def __iter__(self):
return iter(self.cells)
def __getitem__(self, key):
for cell in self.cells:
if cell["column"] == key:
return cell["raw"]
raise KeyError
def display(self, key):
for cell in self.cells:
if cell["column"] == key:
return cell["value"]
return None
def __str__(self):
d = {
key: self[key]
for key in [
c["column"] for c in self.cells if not c.get("is_special_link_column")
]
}
return json.dumps(d, default=repr, indent=2)
class RowTableShared(DataView):
async def sortable_columns_for_table(self, database, table, use_rowid):
db = self.ds.databases[database]
table_metadata = self.ds.table_metadata(database, table)
if "sortable_columns" in table_metadata:
sortable_columns = set(table_metadata["sortable_columns"])
else:
sortable_columns = set(await db.table_columns(table))
if use_rowid:
sortable_columns.add("rowid")
return sortable_columns
async def expandable_columns(self, database, table):
# Returns list of (fk_dict, label_column-or-None) pairs for that table
expandables = []
db = self.ds.databases[database]
for fk in await db.foreign_keys_for_table(table):
label_column = await db.label_column_for_table(fk["other_table"])
expandables.append((fk, label_column))
return expandables
async def display_columns_and_rows(
self, database, table, description, rows, link_column=False, truncate_cells=0
):
"Returns columns, rows for specified table - including fancy foreign key treatment"
db = self.ds.databases[database]
table_metadata = self.ds.table_metadata(database, table)
sortable_columns = await self.sortable_columns_for_table(database, table, True)
columns = [
{"name": r[0], "sortable": r[0] in sortable_columns} for r in description
]
pks = await db.primary_keys(table)
column_to_foreign_key_table = {
fk["column"]: fk["other_table"]
for fk in await db.foreign_keys_for_table(table)
}
cell_rows = []
base_url = self.ds.config("base_url")
for row in rows:
cells = []
# Unless we are a view, the first column is a link - either to the rowid
# or to the simple or compound primary key
if link_column:
is_special_link_column = len(pks) != 1
pk_path = path_from_row_pks(row, pks, not pks, False)
cells.append(
{
"column": pks[0] if len(pks) == 1 else "Link",
"value_type": "pk",
"is_special_link_column": is_special_link_column,
"raw": pk_path,
"value": jinja2.Markup(
'<a href="{base_url}{database}/{table}/{flat_pks_quoted}">{flat_pks}</a>'.format(
base_url=base_url,
database=database,
table=urllib.parse.quote_plus(table),
flat_pks=str(jinja2.escape(pk_path)),
flat_pks_quoted=path_from_row_pks(row, pks, not pks),
)
),
}
)
for value, column_dict in zip(row, columns):
column = column_dict["name"]
if link_column and len(pks) == 1 and column == pks[0]:
# If there's a simple primary key, don't repeat the value as it's
# already shown in the link column.
continue
# First let the plugins have a go
# pylint: disable=no-member
plugin_display_value = pm.hook.render_cell(
value=value,
column=column,
table=table,
database=database,
datasette=self.ds,
)
if plugin_display_value is not None:
display_value = plugin_display_value
elif isinstance(value, bytes):
display_value = jinja2.Markup(
"<Binary data: {} byte{}>".format(
len(value), "" if len(value) == 1 else "s"
)
)
elif isinstance(value, dict):
# It's an expanded foreign key - display link to other row
label = value["label"]
value = value["value"]
# The table we link to depends on the column
other_table = column_to_foreign_key_table[column]
link_template = (
LINK_WITH_LABEL if (label != value) else LINK_WITH_VALUE
)
display_value = jinja2.Markup(
link_template.format(
database=database,
base_url=base_url,
table=urllib.parse.quote_plus(other_table),
link_id=urllib.parse.quote_plus(str(value)),
id=str(jinja2.escape(value)),
label=str(jinja2.escape(label)),
)
)
elif value in ("", None):
display_value = jinja2.Markup(" ")
elif is_url(str(value).strip()):
display_value = jinja2.Markup(
'<a href="{url}">{url}</a>'.format(
url=jinja2.escape(value.strip())
)
)
elif column in table_metadata.get("units", {}) and value != "":
# Interpret units using pint
value = value * ureg(table_metadata["units"][column])
# Pint uses floating point which sometimes introduces errors in the compact
# representation, which we have to round off to avoid ugliness. In the vast
# majority of cases this rounding will be inconsequential. I hope.
value = round(value.to_compact(), 6)
display_value = jinja2.Markup(
"{:~P}".format(value).replace(" ", " ")
)
else:
display_value = str(value)
if truncate_cells and len(display_value) > truncate_cells:
display_value = display_value[:truncate_cells] + u"\u2026"
cells.append(
{
"column": column,
"value": display_value,
"raw": value,
"value_type": "none"
if value is None
else str(type(value).__name__),
}
)
cell_rows.append(Row(cells))
if link_column:
# Add the link column header.
# If it's a simple primary key, we have to remove and re-add that column name at
# the beginning of the header row.
if len(pks) == 1:
columns = [col for col in columns if col["name"] != pks[0]]
columns = [
{"name": pks[0] if len(pks) == 1 else "Link", "sortable": len(pks) == 1}
] + columns
return columns, cell_rows
class TableView(RowTableShared):
name = "table"
async def post(self, request, db_name, table_and_format):
# Handle POST to a canned query
canned_query = await self.ds.get_canned_query(
db_name, table_and_format, request.actor
)
assert canned_query, "You may only POST to a canned query"
return await QueryView(self.ds).data(
request,
db_name,
None,
canned_query["sql"],
metadata=canned_query,
editable=False,
canned_query=table_and_format,
named_parameters=canned_query.get("params"),
write=bool(canned_query.get("write")),
)
async def data(
self,
request,
database,
hash,
table,
default_labels=False,
_next=None,
_size=None,
):
canned_query = await self.ds.get_canned_query(database, table, request.actor)
if canned_query:
return await QueryView(self.ds).data(
request,
database,
hash,
canned_query["sql"],
metadata=canned_query,
editable=False,
canned_query=table,
named_parameters=canned_query.get("params"),
write=bool(canned_query.get("write")),
)
db = self.ds.databases[database]
is_view = bool(await db.get_view_definition(table))
table_exists = bool(await db.table_exists(table))
if not is_view and not table_exists:
raise NotFound("Table not found: {}".format(table))
await self.check_permission(request, "view-instance")
await self.check_permission(request, "view-database", database)
await self.check_permission(request, "view-table", (database, table))
private = not await self.ds.permission_allowed(
None, "view-table", (database, table), default=True
)
pks = await db.primary_keys(table)
table_columns = await db.table_columns(table)
select_columns = ", ".join(escape_sqlite(t) for t in table_columns)
use_rowid = not pks and not is_view
if use_rowid:
select = "rowid, {}".format(select_columns)
order_by = "rowid"
order_by_pks = "rowid"
else:
select = select_columns
order_by_pks = ", ".join([escape_sqlite(pk) for pk in pks])
order_by = order_by_pks
if is_view:
order_by = ""
# Ensure we don't drop anything with an empty value e.g. ?name__exact=
args = MultiParams(
urllib.parse.parse_qs(request.query_string, keep_blank_values=True)
)
# Special args start with _ and do not contain a __
# That's so if there is a column that starts with _
# it can still be queried using ?_col__exact=blah
special_args = {}
other_args = []
for key in args:
if key.startswith("_") and "__" not in key:
special_args[key] = args[key]
else:
for v in args.getlist(key):
other_args.append((key, v))
# Handle ?_filter_column and redirect, if present
redirect_params = filters_should_redirect(special_args)
if redirect_params:
return self.redirect(
request,
path_with_added_args(request, redirect_params),
forward_querystring=False,
)
# Spot ?_sort_by_desc and redirect to _sort_desc=(_sort)
if "_sort_by_desc" in special_args:
return self.redirect(
request,
path_with_added_args(
request,
{
"_sort_desc": special_args.get("_sort"),
"_sort_by_desc": None,
"_sort": None,
},
),
forward_querystring=False,
)
table_metadata = self.ds.table_metadata(database, table)
units = table_metadata.get("units", {})
filters = Filters(sorted(other_args), units, ureg)
where_clauses, params = filters.build_where_clauses(table)
extra_wheres_for_ui = []
# Add _where= from querystring
if "_where" in request.args:
if not await self.ds.permission_allowed(
request.actor, "execute-sql", resource=database, default=True,
):
raise DatasetteError("_where= is not allowed", status=403)
else:
where_clauses.extend(request.args.getlist("_where"))
extra_wheres_for_ui = [
{
"text": text,
"remove_url": path_with_removed_args(request, {"_where": text}),
}
for text in request.args.getlist("_where")
]
# Support for ?_through={table, column, value}
extra_human_descriptions = []
if "_through" in request.args:
for through in request.args.getlist("_through"):
through_data = json.loads(through)
through_table = through_data["table"]
other_column = through_data["column"]
value = through_data["value"]
outgoing_foreign_keys = await db.foreign_keys_for_table(through_table)
try:
fk_to_us = [
fk for fk in outgoing_foreign_keys if fk["other_table"] == table
][0]
except IndexError:
raise DatasetteError(
"Invalid _through - could not find corresponding foreign key"
)
param = "p{}".format(len(params))
where_clauses.append(
"{our_pk} in (select {our_column} from {through_table} where {other_column} = :{param})".format(
through_table=escape_sqlite(through_table),
our_pk=escape_sqlite(fk_to_us["other_column"]),
our_column=escape_sqlite(fk_to_us["column"]),
other_column=escape_sqlite(other_column),
param=param,
)
)
params[param] = value
extra_human_descriptions.append(
'{}.{} = "{}"'.format(through_table, other_column, value)
)
# _search support:
fts_table = special_args.get("_fts_table")
fts_table = fts_table or table_metadata.get("fts_table")
fts_table = fts_table or await db.fts_table(table)
fts_pk = special_args.get("_fts_pk", table_metadata.get("fts_pk", "rowid"))
search_args = dict(
pair for pair in special_args.items() if pair[0].startswith("_search")
)
search = ""
search_mode_raw = special_args.get("_searchmode") == "raw"
if fts_table and search_args:
if "_search" in search_args:
# Simple ?_search=xxx
search = search_args["_search"]
where_clauses.append(
"{fts_pk} in (select rowid from {fts_table} where {fts_table} match {match_clause})".format(
fts_table=escape_sqlite(fts_table),
fts_pk=escape_sqlite(fts_pk),
match_clause=":search"
if search_mode_raw
else "escape_fts(:search)",
)
)
extra_human_descriptions.append('search matches "{}"'.format(search))
params["search"] = search
else:
# More complex: search against specific columns
for i, (key, search_text) in enumerate(search_args.items()):
search_col = key.split("_search_", 1)[1]
if search_col not in await db.table_columns(fts_table):
raise DatasetteError("Cannot search by that column", status=400)
where_clauses.append(
"rowid in (select rowid from {fts_table} where {search_col} match {match_clause})".format(
fts_table=escape_sqlite(fts_table),
search_col=escape_sqlite(search_col),
match_clause=":search_{}".format(i)
if search_mode_raw
else "escape_fts(:search_{})".format(i),
)
)
extra_human_descriptions.append(
'search column "{}" matches "{}"'.format(
search_col, search_text
)
)
params["search_{}".format(i)] = search_text
sortable_columns = set()
sortable_columns = await self.sortable_columns_for_table(
database, table, use_rowid
)
# Allow for custom sort order
sort = special_args.get("_sort")
sort_desc = special_args.get("_sort_desc")
if not sort and not sort_desc:
sort = table_metadata.get("sort")
sort_desc = table_metadata.get("sort_desc")
if sort and sort_desc:
raise DatasetteError("Cannot use _sort and _sort_desc at the same time")
if sort:
if sort not in sortable_columns:
raise DatasetteError("Cannot sort table by {}".format(sort))
order_by = escape_sqlite(sort)
if sort_desc:
if sort_desc not in sortable_columns:
raise DatasetteError("Cannot sort table by {}".format(sort_desc))
order_by = "{} desc".format(escape_sqlite(sort_desc))
from_sql = "from {table_name} {where}".format(
table_name=escape_sqlite(table),
where=("where {} ".format(" and ".join(where_clauses)))
if where_clauses
else "",
)
# Copy of params so we can mutate them later:
from_sql_params = dict(**params)
count_sql = "select count(*) {}".format(from_sql)
_next = _next or special_args.get("_next")
offset = ""
if _next:
if is_view:
# _next is an offset
offset = " offset {}".format(int(_next))
else:
components = urlsafe_components(_next)
# If a sort order is applied, the first of these is the sort value
if sort or sort_desc:
sort_value = components[0]
# Special case for if non-urlencoded first token was $null
if _next.split(",")[0] == "$null":
sort_value = None
components = components[1:]
# Figure out the SQL for next-based-on-primary-key first
next_by_pk_clauses = []
if use_rowid:
next_by_pk_clauses.append("rowid > :p{}".format(len(params)))
params["p{}".format(len(params))] = components[0]
else:
# Apply the tie-breaker based on primary keys
if len(components) == len(pks):
param_len = len(params)
next_by_pk_clauses.append(
compound_keys_after_sql(pks, param_len)
)
for i, pk_value in enumerate(components):
params["p{}".format(param_len + i)] = pk_value
# Now add the sort SQL, which may incorporate next_by_pk_clauses
if sort or sort_desc:
if sort_value is None:
if sort_desc:
# Just items where column is null ordered by pk
where_clauses.append(
"({column} is null and {next_clauses})".format(
column=escape_sqlite(sort_desc),
next_clauses=" and ".join(next_by_pk_clauses),
)
)
else:
where_clauses.append(
"({column} is not null or ({column} is null and {next_clauses}))".format(
column=escape_sqlite(sort),
next_clauses=" and ".join(next_by_pk_clauses),
)
)
else:
where_clauses.append(
"({column} {op} :p{p}{extra_desc_only} or ({column} = :p{p} and {next_clauses}))".format(
column=escape_sqlite(sort or sort_desc),
op=">" if sort else "<",
p=len(params),
extra_desc_only=""
if sort
else " or {column2} is null".format(
column2=escape_sqlite(sort or sort_desc)
),
next_clauses=" and ".join(next_by_pk_clauses),
)
)
params["p{}".format(len(params))] = sort_value
order_by = "{}, {}".format(order_by, order_by_pks)
else:
where_clauses.extend(next_by_pk_clauses)
where_clause = ""
if where_clauses:
where_clause = "where {} ".format(" and ".join(where_clauses))
if order_by:
order_by = "order by {} ".format(order_by)
extra_args = {}
# Handle ?_size=500
page_size = _size or request.args.get("_size") or table_metadata.get("size")
if page_size:
if page_size == "max":
page_size = self.ds.max_returned_rows
try:
page_size = int(page_size)
if page_size < 0:
raise ValueError
except ValueError:
raise DatasetteError("_size must be a positive integer", status=400)
if page_size > self.ds.max_returned_rows:
raise DatasetteError(
"_size must be <= {}".format(self.ds.max_returned_rows), status=400
)
extra_args["page_size"] = page_size
else:
page_size = self.ds.page_size
sql_no_limit = "select {select} from {table_name} {where}{order_by}".format(
select=select,
table_name=escape_sqlite(table),
where=where_clause,
order_by=order_by,
)
sql = "{sql_no_limit} limit {limit}{offset}".format(
sql_no_limit=sql_no_limit.rstrip(), limit=page_size + 1, offset=offset
)
if request.args.get("_timelimit"):
extra_args["custom_time_limit"] = int(request.args.get("_timelimit"))
results = await db.execute(sql, params, truncate=True, **extra_args)
# Number of filtered rows in whole set:
filtered_table_rows_count = None
if (
not db.is_mutable
and self.ds.inspect_data
and count_sql == "select count(*) from {} ".format(table)
):
try:
filtered_table_rows_count = self.ds.inspect_data[database]["tables"][
table
]["count"]
except KeyError:
pass
if count_sql and filtered_table_rows_count is None:
try:
count_rows = list(await db.execute(count_sql, from_sql_params))
filtered_table_rows_count = count_rows[0][0]
except QueryInterrupted:
pass
# facets support
if not self.ds.config("allow_facet") and any(
arg.startswith("_facet") for arg in request.args
):
raise DatasetteError("_facet= is not allowed", status=400)
# pylint: disable=no-member
facet_classes = list(
itertools.chain.from_iterable(pm.hook.register_facet_classes())
)
facet_results = {}
facets_timed_out = []
facet_instances = []
for klass in facet_classes:
facet_instances.append(
klass(
self.ds,
request,
database,
sql=sql_no_limit,
params=params,
table=table,
metadata=table_metadata,
row_count=filtered_table_rows_count,
)
)
for facet in facet_instances:
(
instance_facet_results,
instance_facets_timed_out,
) = await facet.facet_results()
facet_results.update(instance_facet_results)
facets_timed_out.extend(instance_facets_timed_out)
# Figure out columns and rows for the query
columns = [r[0] for r in results.description]
rows = list(results.rows)
# Expand labeled columns if requested
expanded_columns = []
expandable_columns = await self.expandable_columns(database, table)
columns_to_expand = None
try:
all_labels = value_as_boolean(special_args.get("_labels", ""))
except ValueError:
all_labels = default_labels
# Check for explicit _label=
if "_label" in request.args:
columns_to_expand = request.args.getlist("_label")
if columns_to_expand is None and all_labels:
# expand all columns with foreign keys
columns_to_expand = [fk["column"] for fk, _ in expandable_columns]
if columns_to_expand:
expanded_labels = {}
for fk, _ in expandable_columns:
column = fk["column"]
if column not in columns_to_expand:
continue
expanded_columns.append(column)
# Gather the values
column_index = columns.index(column)
values = [row[column_index] for row in rows]
# Expand them
expanded_labels.update(
await self.ds.expand_foreign_keys(database, table, column, values)
)
if expanded_labels:
# Rewrite the rows
new_rows = []
for row in rows:
new_row = CustomRow(columns)
for column in row.keys():
value = row[column]
if (column, value) in expanded_labels and value is not None:
new_row[column] = {
"value": value,
"label": expanded_labels[(column, value)],
}
else:
new_row[column] = value
new_rows.append(new_row)
rows = new_rows
# Pagination next link
next_value = None
next_url = None
if len(rows) > page_size and page_size > 0:
if is_view:
next_value = int(_next or 0) + page_size
else:
next_value = path_from_row_pks(rows[-2], pks, use_rowid)
# If there's a sort or sort_desc, add that value as a prefix
if (sort or sort_desc) and not is_view:
prefix = rows[-2][sort or sort_desc]
if isinstance(prefix, dict) and "value" in prefix:
prefix = prefix["value"]
if prefix is None:
prefix = "$null"
else:
prefix = urllib.parse.quote_plus(str(prefix))
next_value = "{},{}".format(prefix, next_value)
added_args = {"_next": next_value}
if sort:
added_args["_sort"] = sort
else:
added_args["_sort_desc"] = sort_desc
else:
added_args = {"_next": next_value}
next_url = self.ds.absolute_url(
request, path_with_replaced_args(request, added_args)
)
rows = rows[:page_size]
# Detect suggested facets
suggested_facets = []
if (
self.ds.config("suggest_facets")
and self.ds.config("allow_facet")
and not _next
):
for facet in facet_instances:
suggested_facets.extend(await facet.suggest())
# human_description_en combines filters AND search, if provided
human_description_en = filters.human_description_en(
extra=extra_human_descriptions
)
if sort or sort_desc:
sorted_by = "sorted by {}{}".format(
(sort or sort_desc), " descending" if sort_desc else ""
)
human_description_en = " ".join(
[b for b in [human_description_en, sorted_by] if b]
)
async def extra_template():
nonlocal sort
display_columns, display_rows = await self.display_columns_and_rows(
database,
table,
results.description,
rows,
link_column=not is_view,
truncate_cells=self.ds.config("truncate_cells_html"),
)
metadata = (
(self.ds.metadata("databases") or {})
.get(database, {})
.get("tables", {})
.get(table, {})
)
self.ds.update_with_inherited_metadata(metadata)
form_hidden_args = []
for arg in ("_fts_table", "_fts_pk"):
if arg in special_args:
form_hidden_args.append((arg, special_args[arg]))
if request.args.get("_where"):
for where_text in request.args.getlist("_where"):
form_hidden_args.append(("_where", where_text))
# if no sort specified AND table has a single primary key,
# set sort to that so arrow is displayed
if not sort and not sort_desc:
if 1 == len(pks):
sort = pks[0]
elif use_rowid:
sort = "rowid"
return {
"supports_search": bool(fts_table),
"search": search or "",
"use_rowid": use_rowid,
"filters": filters,
"display_columns": display_columns,
"filter_columns": columns,
"display_rows": display_rows,
"facets_timed_out": facets_timed_out,
"sorted_facet_results": sorted(
facet_results.values(),
key=lambda f: (len(f["results"]), f["name"]),
reverse=True,
),
"extra_wheres_for_ui": extra_wheres_for_ui,
"form_hidden_args": form_hidden_args,
"is_sortable": any(c["sortable"] for c in display_columns),
"path_with_replaced_args": path_with_replaced_args,
"path_with_removed_args": path_with_removed_args,
"append_querystring": append_querystring,
"request": request,
"sort": sort,
"sort_desc": sort_desc,
"disable_sort": is_view,
"custom_table_templates": [
"_table-{}-{}.html".format(
to_css_class(database), to_css_class(table)
),
"_table-table-{}-{}.html".format(
to_css_class(database), to_css_class(table)
),
"_table.html",
],
"metadata": metadata,
"view_definition": await db.get_view_definition(table),
"table_definition": await db.get_table_definition(table),
}
return (
{
"database": database,
"table": table,
"is_view": is_view,
"human_description_en": human_description_en,
"rows": rows[:page_size],
"truncated": results.truncated,
"filtered_table_rows_count": filtered_table_rows_count,
"expanded_columns": expanded_columns,
"expandable_columns": expandable_columns,
"columns": columns,
"primary_keys": pks,
"units": units,
"query": {"sql": sql, "params": params},
"facet_results": facet_results,
"suggested_facets": suggested_facets,
"next": next_value and str(next_value) or None,
"next_url": next_url,
"private": private,
"allow_execute_sql": await self.ds.permission_allowed(
request.actor, "execute-sql", database, default=True
),
},
extra_template,
(
"table-{}-{}.html".format(to_css_class(database), to_css_class(table)),
"table.html",
),
)
class RowView(RowTableShared):
name = "row"
async def data(self, request, database, hash, table, pk_path, default_labels=False):
pk_values = urlsafe_components(pk_path)
await self.check_permission(request, "view-instance")
await self.check_permission(request, "view-database", database)
await self.check_permission(request, "view-table", (database, table))
db = self.ds.databases[database]
pks = await db.primary_keys(table)
use_rowid = not pks
select = "*"
if use_rowid:
select = "rowid, *"
pks = ["rowid"]
wheres = ['"{}"=:p{}'.format(pk, i) for i, pk in enumerate(pks)]
sql = "select {} from {} where {}".format(
select, escape_sqlite(table), " AND ".join(wheres)
)
params = {}
for i, pk_value in enumerate(pk_values):
params["p{}".format(i)] = pk_value
results = await db.execute(sql, params, truncate=True)
columns = [r[0] for r in results.description]
rows = list(results.rows)
if not rows:
raise NotFound("Record not found: {}".format(pk_values))
async def template_data():
display_columns, display_rows = await self.display_columns_and_rows(
database,
table,
results.description,
rows,
link_column=False,
truncate_cells=0,
)
for column in display_columns:
column["sortable"] = False
return {
"foreign_key_tables": await self.foreign_key_tables(
database, table, pk_values
),
"display_columns": display_columns,
"display_rows": display_rows,
"custom_table_templates": [
"_table-{}-{}.html".format(
to_css_class(database), to_css_class(table)
),
"_table-row-{}-{}.html".format(
to_css_class(database), to_css_class(table)
),
"_table.html",
],
"metadata": (self.ds.metadata("databases") or {})
.get(database, {})
.get("tables", {})
.get(table, {}),
}
data = {
"database": database,
"table": table,
"rows": rows,
"columns": columns,
"primary_keys": pks,
"primary_key_values": pk_values,
"units": self.ds.table_metadata(database, table).get("units", {}),
}
if "foreign_key_tables" in (request.args.get("_extras") or "").split(","):
data["foreign_key_tables"] = await self.foreign_key_tables(
database, table, pk_values
)
return (
data,
template_data,
(
"row-{}-{}.html".format(to_css_class(database), to_css_class(table)),
"row.html",
),
)
async def foreign_key_tables(self, database, table, pk_values):
if len(pk_values) != 1:
return []
db = self.ds.databases[database]
all_foreign_keys = await db.get_all_foreign_keys()
foreign_keys = all_foreign_keys[table]["incoming"]
if len(foreign_keys) == 0:
return []
sql = "select " + ", ".join(
[
"(select count(*) from {table} where {column}=:id)".format(
table=escape_sqlite(fk["other_table"]),
column=escape_sqlite(fk["other_column"]),
)
for fk in foreign_keys
]
)
try:
rows = list(await db.execute(sql, {"id": pk_values[0]}))
except sqlite3.OperationalError:
# Almost certainly hit the timeout
return []
foreign_table_counts = dict(
zip(
[(fk["other_table"], fk["other_column"]) for fk in foreign_keys],
list(rows[0]),
)
)
foreign_key_tables = []
for fk in foreign_keys:
count = (
foreign_table_counts.get((fk["other_table"], fk["other_column"])) or 0
)
foreign_key_tables.append({**fk, **{"count": count}})
return foreign_key_tables
|
py
|
1a55a5390cd36c477cf6357a62dcabfcdbc9c3d3
|
from datetime import datetime
import time
if __name__ == "__main__":
# get now time
now = datetime.now()
# convert time into timestamp
timstart = datetime.timestamp(now)
print("now is ", now,"<<<>", timstart)
now_end = datetime.now()
timend = now_end.timestamp()
print('second ===>', int(timend - timstart))
seconds = 100
minutes, seconds = divmod(seconds,60)
hours, minutes = divmod(minutes, 60)
print("%02d:%02d:%02d" % (hours, minutes, seconds))
|
py
|
1a55a7256fb362a3683a8b644a9a75371e77a0a4
|
import numpy as np
import matplotlib.pyplot as plt
Nx = 81
Nz = 81
Lx = 91.42
Lz = 100.0
xn = np.linspace(0,Lx,Nx)
Liton = -0.8*Lz + 0.02*Lz*np.cos(np.pi*xn/Lx)
Liton = Liton*1000
f = open("interfaces_creep.txt","w")
f.write("C 1.0 1.0\n")
f.write("rho -1000. 0.\n")
f.write("H 0.0E-12 0.0E-12\n")
f.write("A 0.0 0.0\n")
f.write("n 0.0 0.0\n")
f.write("Q 0.0 0.0\n")
f.write("V 0.0 0.0\n")
for i in np.arange(Nx):
f.write("%lf\n"%(Liton[i]))
f.close()
|
py
|
1a55a8b015ce2b85f88ea05ea2f7d17aa6f4f07d
|
import json
import re
import time
import typing
import warnings
import inspect
import numpy as np
import zmq
from weakref import WeakSet
import threading
import copy
import sys
class DataSocket:
"""
Wrapper for ZMQ socket that sends and recieves dictionaries
"""
def __init__(self, context, port, type, debug, ip_address="127.0.0.1"):
# request reply socket
self._socket = context.socket(type)
self._debug = debug
# store these as wekrefs so that circular refs dont prevent garbage collection
self._java_objects = WeakSet()
# try:
if type == zmq.PUSH:
if debug:
print("binding {}".format(port))
self._socket.bind("tcp://{}:{}".format(ip_address, port))
else:
if debug:
print("connecting {}".format(port))
self._socket.connect("tcp://{}:{}".format(ip_address, port))
# except Exception as e:
# print(e.__traceback__)
# raise Exception('Couldnt connect or bind to port {}'.format(port))
def _register_java_object(self, object):
self._java_objects.add(object)
def __del__(self):
# make sure all shadow objects have signaled to Java side to release references before they shut down
for java_object in self._java_objects:
java_object._close()
def _convert_np_to_python(self, d):
"""
recursively search dictionary and convert any values from numpy floats/ints to
python floats/ints so they can be json serialized
:return:
"""
if type(d) != dict:
return
for k, v in d.items():
if isinstance(v, dict):
self._convert_np_to_python(v)
elif type(v) == list:
for e in v:
self._convert_np_to_python(e)
elif np.issubdtype(type(v), np.floating):
d[k] = float(v)
elif np.issubdtype(type(v), np.integer):
d[k] = int(v)
def _make_array_identifier(self, entry):
"""
make a string to replace bytes data or numpy array in message, which encode data type if numpy
"""
# make up a random 32 bit int as the identifier
# TODO: change to simple counting
identifier = np.random.randint(-(2 ** 31), 2 ** 31 - 1, 1, dtype=np.int32)[0]
# '@{some_number}_{bytes_per_pixel}'
# if its a numpy array, include bytes per pixel, otherwise just interpret it as raw byts
return identifier, "@" + str(int(identifier)) + "_" + str(
0 if isinstance(entry, bytes) else entry.dtype.itemsize
)
def _remove_bytes(self, bytes_data, structure):
if isinstance(structure, list):
for i, entry in enumerate(structure):
if isinstance(entry, bytes) or isinstance(entry, np.ndarray):
int_id, str_id = self._make_array_identifier(entry)
structure[i] = str_id
bytes_data.append((int_id, entry))
elif isinstance(entry, list) or isinstance(entry, dict):
self._remove_bytes(bytes_data, entry)
elif isinstance(structure, dict):
for key in structure.keys():
entry = structure[key]
if isinstance(entry, bytes) or isinstance(entry, np.ndarray):
int_id, str_id = self._make_array_identifier(entry)
structure[key] = str_id
bytes_data.append((int_id, entry))
elif isinstance(entry, list) or isinstance(entry, dict):
self._remove_bytes(bytes_data, structure[key])
def send(self, message, timeout=0):
if message is None:
message = {}
# make sure any np types convert to python types so they can be json serialized
self._convert_np_to_python(message)
# Send binary data in seperate messages so it doesnt need to be json serialized
bytes_data = []
self._remove_bytes(bytes_data, message)
message_string = json.dumps(message)
if self._debug:
print("DEBUG, sending: {}".format(message))
# convert keys to byte array
key_vals = [(identifier.tobytes(), value) for identifier, value in bytes_data]
message_parts = [bytes(message_string, "iso-8859-1")] + [
item for keyval in key_vals for item in keyval
]
if timeout == 0:
self._socket.send_multipart(message_parts)
else:
start = time.time()
while 1000 * (time.time() - start) < timeout:
try:
self._socket.send_multipart(message_parts, flags=zmq.NOBLOCK)
return True
except zmq.ZMQError:
pass # ignore, keep trying
return False
def _replace_bytes(self, dict_or_list, hash, value):
"""
Replace placeholders for byte arrays in JSON message with their actual values
"""
if isinstance(dict_or_list, dict):
for key in dict_or_list:
if isinstance(dict_or_list[key], str) and "@" in dict_or_list[key]:
hash_in_message = int(
dict_or_list[key].split("@")[1], 16
) # interpret hex hash string
if hash == hash_in_message:
dict_or_list[key] = value
return
elif isinstance(dict_or_list[key], list) or isinstance(dict_or_list[key], dict):
self._replace_bytes(dict_or_list[key], hash, value)
elif isinstance(dict_or_list, list):
for i, entry in enumerate(dict_or_list):
if isinstance(entry, str) and "@" in dict_or_list[key]:
hash_in_message = int(entry.split("@")[1], 16) # interpret hex hash string
if hash == hash_in_message:
dict_or_list[i] = value
return
elif isinstance(entry, list) or isinstance(entry, dict):
self._replace_bytes(entry, hash, value)
def receive(self, timeout=0):
if timeout == 0:
reply = self._socket.recv_multipart()
else:
start = time.time()
reply = None
while 1000 * (time.time() - start) < timeout:
try:
reply = self._socket.recv_multipart(flags=zmq.NOBLOCK)
if reply is not None:
break
except zmq.ZMQError:
pass # ignore, keep trying
if reply is None:
return reply
message = json.loads(reply[0].decode("iso-8859-1"))
# replace any byte data placeholders with the byte data itself
for i in np.arange(1, len(reply), 2):
# messages come in pairs: first is hash, second it byte data
identity_hash = int.from_bytes(reply[i], byteorder=sys.byteorder)
value = reply[i + 1]
self._replace_bytes(message, identity_hash, value)
if self._debug:
print("DEBUG, recieved: {}".format(message))
self._check_exception(message)
return message
def _check_exception(self, response):
if "type" in response and response["type"] == "exception":
raise Exception(response["value"])
def close(self):
self._socket.close()
class Bridge:
"""
Create an object which acts as a client to a corresponding server running within micro-manager.
This enables construction and interaction with arbitrary java objects
"""
_DEFAULT_PORT = 4827
_EXPECTED_ZMQ_SERVER_VERSION = "4.0.0"
thread_local = threading.local()
def __new__(cls, *args, **kwargs):
"""
Only one instance of Bridge per a thread
"""
if hasattr(Bridge.thread_local, "bridge"):
return Bridge.thread_local.bridge
else:
return super(Bridge, cls).__new__(cls)
def __init__(
self, port=_DEFAULT_PORT, convert_camel_case=True, debug=False, ip_address="127.0.0.1"
):
"""
Parameters
----------
port : int
The port on which the bridge operates
convert_camel_case : bool
If True, methods for Java objects that are passed across the bridge
will have their names converted from camel case to underscores. i.e. class.methodName()
becomes class.method_name()
debug : bool
If True print helpful stuff for debugging
"""
self._ip_address = ip_address
if not hasattr(self, "_context"):
Bridge._context = zmq.Context()
if hasattr(self.thread_local, "bridge"):
return
self.thread_local.bridge = self # cache a thread-local version of the bridge
self._convert_camel_case = convert_camel_case
self._debug = debug
self._master_socket = DataSocket(
self._context, port, zmq.REQ, debug=debug, ip_address=self._ip_address
)
self._master_socket.send({"command": "connect", "debug": debug})
self._class_factory = _JavaClassFactory()
reply_json = self._master_socket.receive(timeout=500)
if reply_json is None:
raise TimeoutError(
"Socket timed out after 500 milliseconds. Is Micro-Manager running and is the ZMQ server option enabled?"
)
if reply_json["type"] == "exception":
raise Exception(reply_json["message"])
if "version" not in reply_json:
reply_json["version"] = "2.0.0" # before version was added
if reply_json["version"] != self._EXPECTED_ZMQ_SERVER_VERSION:
warnings.warn(
"Version mistmatch between Java ZMQ server and Python client. "
"\nJava ZMQ server version: {}\nPython client expected version: {}"
"\n To fix, update to BOTH latest pycromanager and latest micro-manager nightly build".format(
reply_json["version"], self._EXPECTED_ZMQ_SERVER_VERSION
)
)
def get_class(self, serialized_object) -> typing.Type["JavaObjectShadow"]:
return self._class_factory.create(
serialized_object, convert_camel_case=self._convert_camel_case
)
def construct_java_object(self, classpath, new_socket=False, args=None):
"""
Create a new instance of a an object on the Java side. Returns a Python "Shadow" of the object, which behaves
just like the object on the Java side (i.e. same methods, fields). Methods of the object can be inferred at
runtime using iPython autocomplete
Parameters
----------
classpath : str
Full classpath of the java object
new_socket : bool
If True, will create new java object on a new port so that blocking calls will not interfere
with the bridges master port
args : list
list of arguments to the constructor, if applicable
Returns
-------
Python "Shadow" to the Java object
"""
if args is None:
args = []
# classpath_minus_class = '.'.join(classpath.split('.')[:-1])
# query the server for constructors matching this classpath
message = {"command": "get-constructors", "classpath": classpath}
self._master_socket.send(message)
constructors = self._master_socket.receive()["api"]
methods_with_name = [m for m in constructors if m["name"] == classpath]
if len(methods_with_name) == 0:
raise Exception("No valid java constructor found with classpath {}".format(classpath))
valid_method_spec, deserialize_types = _check_method_args(methods_with_name, args)
# Calling a constructor, rather than getting return from method
message = {
"command": "constructor",
"classpath": classpath,
"argument-types": valid_method_spec["arguments"],
"argument-deserialization-types": deserialize_types,
"arguments": _package_arguments(valid_method_spec, args),
}
if new_socket:
message["new-port"] = True
self._master_socket.send(message)
serialized_object = self._master_socket.receive()
if new_socket:
socket = DataSocket(
self._context, serialized_object["port"], zmq.REQ, ip_address=self._ip_address
)
else:
socket = self._master_socket
return self._class_factory.create(
serialized_object, convert_camel_case=self._convert_camel_case
)(socket=socket, serialized_object=serialized_object, bridge=self)
def _connect_push(self, port):
"""
Connect a push socket on the given port
:param port:
:return:
"""
return DataSocket(
self._context, port, zmq.PUSH, debug=self._debug, ip_address=self._ip_address
)
def _connect_pull(self, port):
"""
Connect to a pull socket on the given port
:param port:
:return:
"""
return DataSocket(
self._context, port, zmq.PULL, debug=self._debug, ip_address=self._ip_address
)
def get_magellan(self):
"""
return an instance of the Micro-Magellan API
"""
return self.construct_java_object("org.micromanager.magellan.api.MagellanAPI")
def get_core(self):
"""
Connect to CMMCore and return object that has its methods
:return: Python "shadow" object for micromanager core
"""
if hasattr(self, "core"):
return getattr(self, "core")
self.core = self.construct_java_object("mmcorej.CMMCore")
return self.core
def get_studio(self):
"""
return an instance of the Studio object that provides access to micro-manager Java APIs
"""
return self.construct_java_object("org.micromanager.Studio")
class _JavaClassFactory:
"""
This class is responsible for generating subclasses of JavaObjectShadow. Each generated class is kept in a `dict`.
If a given class has already been generate once it will be returns from the cache rather than re-generating it.
"""
def __init__(self):
self.classes = {}
def create(
self, serialized_obj: dict, convert_camel_case: bool = True
) -> typing.Type["JavaObjectShadow"]:
"""Create a class (or return a class from the cache) based on the contents of `serialized_object` message."""
if serialized_obj["class"] in self.classes.keys(): # Return a cached class
return self.classes[serialized_obj["class"]]
else: # Generate a new class since it wasn't found in the cache.
_java_class: str = serialized_obj["class"]
python_class_name_translation = _java_class.replace(
".", "_"
) # Having periods in the name would be problematic.
_interfaces = serialized_obj["interfaces"]
static_attributes = {"_java_class": _java_class, "_interfaces": _interfaces}
fields = {} # Create a dict of field names with getter and setter funcs.
for field in serialized_obj["fields"]:
fields[field] = property(
fget=lambda instance, Field=field: instance._access_field(Field),
fset=lambda instance, val, Field=field: instance._set_field(Field, val),
)
methods = {} # Create a dict of methods for the class by name.
methodSpecs = serialized_obj["api"]
method_names = set([m["name"] for m in methodSpecs])
# parse method descriptions to make python stand ins
for method_name in method_names:
params, methods_with_name, method_name_modified = _parse_arg_names(
methodSpecs, method_name, convert_camel_case
)
return_type = methods_with_name[0]["return-type"]
fn = lambda instance, *args, signatures_list=tuple(
methods_with_name
): instance._translate_call(signatures_list, args)
fn.__name__ = method_name_modified
fn.__doc__ = "{}.{}: A dynamically generated Java method.".format(
_java_class, method_name_modified
)
sig = inspect.signature(fn)
params = [
inspect.Parameter("self", inspect.Parameter.POSITIONAL_ONLY)
] + params # Add `self` as the first argument.
return_type = (
_JAVA_TYPE_NAME_TO_PYTHON_TYPE[return_type]
if return_type in _JAVA_TYPE_NAME_TO_PYTHON_TYPE
else return_type
)
fn.__signature__ = sig.replace(parameters=params, return_annotation=return_type)
methods[method_name_modified] = fn
newclass = type( # Dynamically create a class to shadow a java class.
python_class_name_translation, # Name, based on the original java name
(JavaObjectShadow,), # Inheritance
{
"__init__": lambda instance, socket, serialized_object, bridge: JavaObjectShadow.__init__(
instance, socket, serialized_object, bridge
),
**static_attributes,
**fields,
**methods,
},
)
self.classes[_java_class] = newclass
return newclass
class JavaObjectShadow:
"""
Generic class for serving as a python interface for a micromanager class using a zmq server backend
"""
_interfaces = (
None # Subclasses should fill these out. This class should never be directly instantiated.
)
_java_class = None
def __init__(self, socket, serialized_object, bridge: Bridge):
self._socket = socket
self._hash_code = serialized_object["hash-code"]
self._bridge = bridge
# register objects with bridge so it can tell Java side to release them before socket shuts down
socket._register_java_object(self)
self._closed = False
# atexit.register(self._close)
def _close(self):
if self._closed:
return
if not hasattr(self, "_hash_code"):
return # constructor didnt properly finish, nothing to clean up on java side
message = {"command": "destructor", "hash-code": self._hash_code}
if self._bridge._debug:
print("closing: {}".format(self))
self._socket.send(message)
reply_json = self._socket.receive()
if reply_json["type"] == "exception":
raise Exception(reply_json["value"])
self._closed = True
def __del__(self):
"""
Tell java side this object is garbage collected so it can do the same if needed
"""
self._close()
def _access_field(self, name):
"""
Return a python version of the field with a given name
:return:
"""
message = {"command": "get-field", "hash-code": self._hash_code, "name": name}
self._socket.send(message)
return self._deserialize(self._socket.receive())
def _set_field(self, name, value):
"""
Return a python version of the field with a given name
:return:
"""
message = {
"command": "set-field",
"hash-code": self._hash_code,
"name": name,
"value": _serialize_arg(value),
}
self._socket.send(message)
reply = self._deserialize(self._socket.receive())
def _translate_call(self, method_specs, fn_args: tuple):
"""
Translate to appropriate Java method, call it, and return converted python version of its result
Parameters
----------
args :
args[0] is list of dictionaries of possible method specifications
kwargs :
hold possible polymorphic args, or none
"""
# args that are none are placeholders to allow for polymorphism and not considered part of the spec
# fn_args = [a for a in fn_args if a is not None]
valid_method_spec, deserialize_types = _check_method_args(method_specs, fn_args)
# args are good, make call through socket, casting the correct type if needed (e.g. int to float)
message = {
"command": "run-method",
"hash-code": self._hash_code,
"name": valid_method_spec["name"],
"argument-types": valid_method_spec["arguments"],
"argument-deserialization-types": deserialize_types,
}
message["arguments"] = _package_arguments(valid_method_spec, fn_args)
self._socket.send(message)
return self._deserialize(self._socket.receive())
def _deserialize(self, json_return):
"""
method_spec :
info about the method that called it
reply :
bytes that represents return
Returns
-------
An appropriate python type of the converted value
"""
if json_return["type"] == "exception":
raise Exception(json_return["value"])
elif json_return["type"] == "null":
return None
elif json_return["type"] == "primitive":
return json_return["value"]
elif json_return["type"] == "string":
return json_return["value"]
elif json_return["type"] == "list":
return [self._deserialize(obj) for obj in json_return["value"]]
elif json_return["type"] == "object":
if json_return["class"] == "JSONObject":
return json.loads(json_return["value"])
else:
raise Exception("Unrecognized return class")
elif json_return["type"] == "unserialized-object":
# inherit socket from parent object
return self._bridge.get_class(json_return)(
socket=self._socket, serialized_object=json_return, bridge=self._bridge
)
else:
return deserialize_array(json_return)
def deserialize_array(json_return):
"""
Convert a serialized java array to the appropriate numpy type
Parameters
----------
json_return
"""
if json_return["type"] in ["byte-array", "int-array", "short-array", "float-array"]:
decoded = json_return["value"]
if json_return["type"] == "byte-array":
return np.frombuffer(decoded, dtype="=u1").copy()
elif json_return["type"] == "double-array":
return np.frombuffer(decoded, dtype="=f8").copy()
elif json_return["type"] == "int-array":
return np.frombuffer(decoded, dtype="=u4").copy()
elif json_return["type"] == "short-array":
return np.frombuffer(decoded, dtype="=u2").copy()
elif json_return["type"] == "float-array":
return np.frombuffer(decoded, dtype="=f4").copy()
def _package_arguments(valid_method_spec, fn_args):
"""
Serialize function arguments and also include description of their Java types
Parameters
----------
valid_method_spec:
fn_args :
"""
arguments = []
for arg_type, arg_val in zip(valid_method_spec["arguments"], fn_args):
if isinstance(arg_val, JavaObjectShadow):
arguments.append(_serialize_arg(arg_val))
elif _JAVA_TYPE_NAME_TO_PYTHON_TYPE[arg_type] is object:
arguments.append(_serialize_arg(arg_val))
elif arg_val is None:
arguments.append(_serialize_arg(arg_val))
elif isinstance(arg_val, np.ndarray):
arguments.append(_serialize_arg(arg_val))
else:
arguments.append(_serialize_arg(_JAVA_TYPE_NAME_TO_PYTHON_TYPE[arg_type](arg_val)))
return arguments
def _serialize_arg(arg):
if arg is None:
return None
if type(arg) in [bool, str, int, float]:
return arg # json handles serialization
elif type(arg) == np.ndarray:
return arg.tobytes()
elif isinstance(arg, JavaObjectShadow):
return {"hash-code": arg._hash_code}
else:
raise Exception("Unknown argumetn type")
def _check_single_method_spec(method_spec, fn_args):
"""
Check if a single method specificiation is compatible with the arguments the function recieved
Parameters
----------
method_spec :
fn_args :
"""
if len(method_spec["arguments"]) != len(fn_args):
return False
for arg_java_type, arg_val in zip(method_spec["arguments"], fn_args):
if isinstance(arg_val, JavaObjectShadow):
if arg_java_type not in arg_val._interfaces:
# check that it shadows object of the correct type
return False
elif type(arg_val) == np.ndarray:
# For ND Arrays, need to make sure data types match
if (
arg_java_type != "java.lang.Object"
and arg_val.dtype.type != _JAVA_ARRAY_TYPE_NUMPY_DTYPE[arg_java_type]
):
return False
elif not any(
[
isinstance(arg_val, acceptable_type)
for acceptable_type in _JAVA_TYPE_NAME_TO_CASTABLE_PYTHON_TYPE[arg_java_type]
]
) and not (
arg_val is None and arg_java_type in _JAVA_NON_PRIMITIVES
): # could be null if its an object
# if a type that gets converted
return False
return True
def _check_method_args(method_specs, fn_args):
"""
Compare python arguments to java arguments to find correct function to call
Parameters
----------
method_specs :
fn_args :
Returns
-------
one of the method_specs that is valid
"""
valid_method_spec = None
for method_spec in method_specs:
if _check_single_method_spec(method_spec, fn_args):
valid_method_spec = method_spec
break
if valid_method_spec is None:
raise Exception(
"Incorrect arguments. \nExpected {} \nGot {}".format(
" or ".join([", ".join(method_spec["arguments"]) for method_spec in method_specs]),
", ".join([str(type(a)) for a in fn_args]),
)
)
# subclass NDArrays to the appropriate data type so they dont get incorrectly reconstructed as objects
valid_method_spec = copy.deepcopy(valid_method_spec)
deserialize_types = []
for java_arg_class, python_arg_val in zip(valid_method_spec["arguments"], fn_args):
if isinstance(python_arg_val, np.ndarray):
deserialize_types.append(
[
ja
for ja, npdt in zip(
_JAVA_ARRAY_TYPE_NUMPY_DTYPE.keys(), _JAVA_ARRAY_TYPE_NUMPY_DTYPE.values()
)
if python_arg_val.dtype.type == npdt
][0]
)
else:
deserialize_types.append(java_arg_class)
return valid_method_spec, deserialize_types
def _parse_arg_names(methods, method_name, convert_camel_case):
method_name_modified = (
_camel_case_2_snake_case(method_name) if convert_camel_case else method_name
)
# all methods with this name and different argument lists
methods_with_name = [m for m in methods if m["name"] == method_name]
min_required_args = (
0
if len(methods_with_name) == 1 and len(methods_with_name[0]["arguments"]) == 0
else min([len(m["arguments"]) for m in methods_with_name])
)
# sort with largest number of args last so lambda at end gets max num args
methods_with_name.sort(key=lambda val: len(val["arguments"]))
method = methods_with_name[-1] # We only need to evaluate the overload with the most arguments.
params = []
unique_argument_names = []
for arg_index, typ in enumerate(method["arguments"]):
hint = _CLASS_NAME_MAPPING[typ] if typ in _CLASS_NAME_MAPPING else "object"
python_type = (
_JAVA_TYPE_NAME_TO_PYTHON_TYPE[typ] if typ in _JAVA_TYPE_NAME_TO_PYTHON_TYPE else typ
)
if hint in unique_argument_names: # append numbers to end so arg hints have unique names
i = 1
while hint + str(i) in unique_argument_names:
i += 1
arg_name = hint + str(i)
else:
arg_name = hint
unique_argument_names.append(arg_name)
# this is how overloading is handled for now, by making default arguments as none, but
# it might be better to explicitly compare argument types
if arg_index >= min_required_args:
default_arg_value = None
else:
default_arg_value = inspect.Parameter.empty
params.append(
inspect.Parameter(
name=arg_name,
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,
default=default_arg_value,
annotation=python_type,
)
)
return params, methods_with_name, method_name_modified
def _camel_case_2_snake_case(name):
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower()
_CLASS_NAME_MAPPING = {
"boolean": "boolean",
"byte[]": "uint8array",
"double": "float",
"double[]": "float64_array",
"float": "float",
"int": "int",
"int[]": "uint32_array",
"java.lang.String": "string",
"long": "int",
"short": "int",
"void": "void",
}
_JAVA_ARRAY_TYPE_NUMPY_DTYPE = {
"byte[]": np.uint8,
"short[]": np.uint16,
"double[]": np.float64,
"int[]": np.int32,
}
_JAVA_TYPE_NAME_TO_PYTHON_TYPE = {
"boolean": bool,
"double": float,
"float": float,
"byte[]": np.ndarray,
"short[]": np.ndarray,
"double[]": np.ndarray,
"int[]": np.ndarray,
"int": int,
"java.lang.String": str,
"long": int,
"short": int,
"char": int,
"byte": int,
"void": None,
"java.lang.Object": object,
}
# type conversions that allow for autocasting
_JAVA_TYPE_NAME_TO_CASTABLE_PYTHON_TYPE = {
"boolean": {bool},
"byte[]": {np.ndarray},
"double": {float, int},
"double[]": {np.ndarray},
"float": {float},
"int": {int},
"int[]": {np.ndarray},
"java.lang.String": {str},
"long": {int},
"short": {int},
"char": {int},
"byte": {int},
"void": {None},
"java.lang.Object": {object},
}
_JAVA_NON_PRIMITIVES = {"byte[]", "double[]", "int[]", "java.lang.String", "java.lang.Object"}
if __name__ == "__main__":
# Test basic bridge operations
import traceback
b = Bridge()
try:
s = b.get_studio()
except:
traceback.print_exc()
try:
c = b.get_core()
except:
traceback.print_exc()
a = 1
|
py
|
1a55a8f14f940df08a1c1818762dd1a3d8b0fda6
|
# jwt.py
'''JWT generation logic'''
from flask import abort
from flask_jwt_extended import (create_access_token,
create_refresh_token)
from .models import User
import datetime
from .hashing import verify_password
def user_gen_jwt(username, password='', refresh=False):
'''
Main JWT dispatching function. Checks incoming username data and checks if
user exists in Database.
Args:
username (str): Username string `email` that will be queried into DB
[password (str)]: Password string unhashed to output JWT with increased
security
refresh (bool): Whether return as refresh token or not
Raises:
TypeError,
Abort 401: Failed username query
Returns:
access_token (str): JWT session key with 3600min life-span
[refresh_token (str)]: JWT refresh session key, returned only if
refresh==True
'''
aux = User.query.filter_by(username=username).first()
if aux and verify_password(aux.password, password):
if refresh:
refresh_token = create_refresh_token(identity=username)
return refresh_token
access_token = create_access_token(identity=username)
return access_token
elif aux and password == '':
access_token = create_access_token(identity=username)
return access_token
else:
abort(401)
|
py
|
1a55aa535153a04ee310091b75d2361f01b205d7
|
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class FCoEGSNNNN(Base):
__slots__ = ()
_SDM_NAME = 'fCoEGSNNNN'
_SDM_ATT_MAP = {
'FcoeHeaderVersion': 'fCoEGSNNNN.header.fcoeHeader.version-1',
'FcoeHeaderReserved': 'fCoEGSNNNN.header.fcoeHeader.reserved-2',
'FcoeHeaderESOF': 'fCoEGSNNNN.header.fcoeHeader.eSOF-3',
'DeviceDataFramesDeviceDataInfo': 'fCoEGSNNNN.header.fcHeader.rCTL.deviceDataFrames.deviceDataInfo-4',
'RCTLReserved': 'fCoEGSNNNN.header.fcHeader.rCTL.reserved-5',
'ExtendedLinkServicesInfo': 'fCoEGSNNNN.header.fcHeader.rCTL.extendedLinkServices.info-6',
'Fc4LinkDataInfo': 'fCoEGSNNNN.header.fcHeader.rCTL.fc4LinkData.info-7',
'VideoDataInfo': 'fCoEGSNNNN.header.fcHeader.rCTL.videoData.info-8',
'ExtendedHeaderInfo': 'fCoEGSNNNN.header.fcHeader.rCTL.extendedHeader.info-9',
'BasicLinkServicesInfo': 'fCoEGSNNNN.header.fcHeader.rCTL.basicLinkServices.info-10',
'LinkControlFramesInfo': 'fCoEGSNNNN.header.fcHeader.rCTL.linkControlFrames.info-11',
'ExtendedRoutingInfo': 'fCoEGSNNNN.header.fcHeader.rCTL.extendedRouting.info-12',
'FcHeaderDstId': 'fCoEGSNNNN.header.fcHeader.dstId-13',
'FcHeaderCsCTLPriority': 'fCoEGSNNNN.header.fcHeader.csCTLPriority-14',
'FcHeaderSrcId': 'fCoEGSNNNN.header.fcHeader.srcId-15',
'FcHeaderType': 'fCoEGSNNNN.header.fcHeader.type-16',
'FCTLCustom': 'fCoEGSNNNN.header.fcHeader.fCTL.custom-17',
'BuildFCTLExchangeContext': 'fCoEGSNNNN.header.fcHeader.fCTL.buildFCTL.exchangeContext-18',
'BuildFCTLSequenceContext': 'fCoEGSNNNN.header.fcHeader.fCTL.buildFCTL.sequenceContext-19',
'BuildFCTLFirstSequence': 'fCoEGSNNNN.header.fcHeader.fCTL.buildFCTL.firstSequence-20',
'BuildFCTLLastSequence': 'fCoEGSNNNN.header.fcHeader.fCTL.buildFCTL.lastSequence-21',
'BuildFCTLEndSequence': 'fCoEGSNNNN.header.fcHeader.fCTL.buildFCTL.endSequence-22',
'BuildFCTLEndConnection': 'fCoEGSNNNN.header.fcHeader.fCTL.buildFCTL.endConnection-23',
'BuildFCTLCsCTLPriority': 'fCoEGSNNNN.header.fcHeader.fCTL.buildFCTL.csCTLPriority-24',
'BuildFCTLSequenceInitiative': 'fCoEGSNNNN.header.fcHeader.fCTL.buildFCTL.sequenceInitiative-25',
'BuildFCTLFcXIDReassigned': 'fCoEGSNNNN.header.fcHeader.fCTL.buildFCTL.fcXIDReassigned-26',
'BuildFCTLFcInvalidateXID': 'fCoEGSNNNN.header.fcHeader.fCTL.buildFCTL.fcInvalidateXID-27',
'BuildFCTLAckForm': 'fCoEGSNNNN.header.fcHeader.fCTL.buildFCTL.ackForm-28',
'BuildFCTLFcDataCompression': 'fCoEGSNNNN.header.fcHeader.fCTL.buildFCTL.fcDataCompression-29',
'BuildFCTLFcDataEncryption': 'fCoEGSNNNN.header.fcHeader.fCTL.buildFCTL.fcDataEncryption-30',
'BuildFCTLRetransmittedSequence': 'fCoEGSNNNN.header.fcHeader.fCTL.buildFCTL.retransmittedSequence-31',
'BuildFCTLUnidirectionalTransmit': 'fCoEGSNNNN.header.fcHeader.fCTL.buildFCTL.unidirectionalTransmit-32',
'BuildFCTLContinueSeqCondition': 'fCoEGSNNNN.header.fcHeader.fCTL.buildFCTL.continueSeqCondition-33',
'BuildFCTLAbortSeqCondition': 'fCoEGSNNNN.header.fcHeader.fCTL.buildFCTL.abortSeqCondition-34',
'BuildFCTLRelativeOffsetPresent': 'fCoEGSNNNN.header.fcHeader.fCTL.buildFCTL.relativeOffsetPresent-35',
'BuildFCTLExchangeReassembly': 'fCoEGSNNNN.header.fcHeader.fCTL.buildFCTL.exchangeReassembly-36',
'BuildFCTLFillBytes': 'fCoEGSNNNN.header.fcHeader.fCTL.buildFCTL.fillBytes-37',
'FcHeaderSeqID': 'fCoEGSNNNN.header.fcHeader.seqID-38',
'FcHeaderDfCTL': 'fCoEGSNNNN.header.fcHeader.dfCTL-39',
'FcHeaderSeqCNT': 'fCoEGSNNNN.header.fcHeader.seqCNT-40',
'FcHeaderOxID': 'fCoEGSNNNN.header.fcHeader.oxID-41',
'FcHeaderRxID': 'fCoEGSNNNN.header.fcHeader.rxID-42',
'FcHeaderParameter': 'fCoEGSNNNN.header.fcHeader.parameter-43',
'FcCTRevision': 'fCoEGSNNNN.header.fcCT.revision-44',
'FcCTInId': 'fCoEGSNNNN.header.fcCT.inId-45',
'FcCTGsType': 'fCoEGSNNNN.header.fcCT.gsType-46',
'FcCTGsSubtype': 'fCoEGSNNNN.header.fcCT.gsSubtype-47',
'FcCTOptions': 'fCoEGSNNNN.header.fcCT.options-48',
'FcCTReserved': 'fCoEGSNNNN.header.fcCT.reserved-49',
'DNSOpcode': 'fCoEGSNNNN.header.dNS.opcode-50',
'DNSMaxsize': 'fCoEGSNNNN.header.dNS.maxsize-51',
'DNSReserved': 'fCoEGSNNNN.header.dNS.reserved-52',
'DNSNodeName': 'fCoEGSNNNN.header.dNS.nodeName-53',
'FcCRCAutoCRC': 'fCoEGSNNNN.header.fcCRC.autoCRC-54',
'FcCRCGenerateBadCRC': 'fCoEGSNNNN.header.fcCRC.generateBadCRC-55',
'FcTrailerEEOF': 'fCoEGSNNNN.header.fcTrailer.eEOF-56',
'FcTrailerReserved': 'fCoEGSNNNN.header.fcTrailer.reserved-57',
}
def __init__(self, parent, list_op=False):
super(FCoEGSNNNN, self).__init__(parent, list_op)
@property
def FcoeHeaderVersion(self):
"""
Display Name: Version
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcoeHeaderVersion']))
@property
def FcoeHeaderReserved(self):
"""
Display Name: Reserved
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcoeHeaderReserved']))
@property
def FcoeHeaderESOF(self):
"""
Display Name: E-SOF
Default Value: 54
Value Format: decimal
Available enum values: SOFf - Fabric, 40, SOFi4 - Initiate Class 4, 41, SOFi2 - Initiate Class 2, 45, SOFi3 - Initiate Class 3, 46, SOFn4 - Normal Class 4, 49, SOFn2 - Normal Class 2, 53, SOFn3 - Normal Class 3, 54, SOFc4 - Connect Class 4, 57, SOFn1 - Normal Class 1 or 6, 250
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcoeHeaderESOF']))
@property
def DeviceDataFramesDeviceDataInfo(self):
"""
Display Name: Information
Default Value: 0
Value Format: decimal
Available enum values: Uncategorized Information, 0, Solicited Data, 1, Unsolicited Control, 2, Solicited Control, 3, Unsolicited Data, 4, Data Descriptor, 5, Unsolicited Command, 6, Command Status, 7
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DeviceDataFramesDeviceDataInfo']))
@property
def RCTLReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RCTLReserved']))
@property
def ExtendedLinkServicesInfo(self):
"""
Display Name: Information
Default Value: 33
Value Format: decimal
Available enum values: Solicited Data, 32, Request, 33, Reply, 34
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedLinkServicesInfo']))
@property
def Fc4LinkDataInfo(self):
"""
Display Name: Information
Default Value: 48
Value Format: decimal
Available enum values: Uncategorized Information, 48, Solicited Data, 49, Unsolicited Control, 50, Solicited Control, 51, Unsolicited Data, 52, Data Descriptor, 53, Unsolicited Command, 54, Command Status, 55
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Fc4LinkDataInfo']))
@property
def VideoDataInfo(self):
"""
Display Name: Information
Default Value: 68
Value Format: decimal
Available enum values: Unsolicited Data, 68
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['VideoDataInfo']))
@property
def ExtendedHeaderInfo(self):
"""
Display Name: Information
Default Value: 80
Value Format: decimal
Available enum values: Virtual Fabric Tagging Header, 80, Inter Fabric Routing Header, 81, Encapsulation Header, 82
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedHeaderInfo']))
@property
def BasicLinkServicesInfo(self):
"""
Display Name: Information
Default Value: 128
Value Format: decimal
Available enum values: No Operation, 128, Abort Sequence, 129, Remove Connection, 130, Basic Accept, 132, Basic Reject, 133, Dedicated Connection Preempted, 134
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BasicLinkServicesInfo']))
@property
def LinkControlFramesInfo(self):
"""
Display Name: Information
Default Value: 192
Value Format: decimal
Available enum values: Acknowledge_1, 128, Acknowledge_0, 129, Nx Port Reject, 130, Fabric Reject, 131, Nx Port Busy, 132, Fabric Busy to Data Frame, 133, Fabric Busy to Link Control Frame, 134, Link Credit Reset, 135, Notify, 136, End, 137
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LinkControlFramesInfo']))
@property
def ExtendedRoutingInfo(self):
"""
Display Name: Information
Default Value: 240
Value Format: decimal
Available enum values: Vendor Unique, 240
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ExtendedRoutingInfo']))
@property
def FcHeaderDstId(self):
"""
Display Name: Destination ID
Default Value: 0
Value Format: fCID
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderDstId']))
@property
def FcHeaderCsCTLPriority(self):
"""
Display Name: CS_CTL/Priority
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderCsCTLPriority']))
@property
def FcHeaderSrcId(self):
"""
Display Name: Source ID
Default Value: 0
Value Format: fCID
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderSrcId']))
@property
def FcHeaderType(self):
"""
Display Name: Type
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderType']))
@property
def FCTLCustom(self):
"""
Display Name: Custom
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCTLCustom']))
@property
def BuildFCTLExchangeContext(self):
"""
Display Name: Exchange Context
Default Value: 0
Value Format: decimal
Available enum values: Originator, 0, Receipient, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLExchangeContext']))
@property
def BuildFCTLSequenceContext(self):
"""
Display Name: Sequence Context
Default Value: 0
Value Format: decimal
Available enum values: Initiator, 0, Receipient, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLSequenceContext']))
@property
def BuildFCTLFirstSequence(self):
"""
Display Name: First Sequence
Default Value: 0
Value Format: decimal
Available enum values: Other, 0, First, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFirstSequence']))
@property
def BuildFCTLLastSequence(self):
"""
Display Name: Last Sequence
Default Value: 0
Value Format: decimal
Available enum values: Other, 0, Last, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLLastSequence']))
@property
def BuildFCTLEndSequence(self):
"""
Display Name: End Sequence
Default Value: 0
Value Format: decimal
Available enum values: Other, 0, Last, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLEndSequence']))
@property
def BuildFCTLEndConnection(self):
"""
Display Name: End Connection
Default Value: 0
Value Format: decimal
Available enum values: Alive, 0, Pending, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLEndConnection']))
@property
def BuildFCTLCsCTLPriority(self):
"""
Display Name: CS_CTL/Priority
Default Value: 0
Value Format: decimal
Available enum values: CS_CTL, 0, Priority, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLCsCTLPriority']))
@property
def BuildFCTLSequenceInitiative(self):
"""
Display Name: Sequence Initiative
Default Value: 0
Value Format: decimal
Available enum values: Hold, 0, Transfer, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLSequenceInitiative']))
@property
def BuildFCTLFcXIDReassigned(self):
"""
Display Name: FC XID Reassigned
Default Value: 0
Value Format: decimal
Available enum values: No, 0, Yes, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFcXIDReassigned']))
@property
def BuildFCTLFcInvalidateXID(self):
"""
Display Name: FC Invalidate XID
Default Value: 0
Value Format: decimal
Available enum values: No, 0, Yes, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFcInvalidateXID']))
@property
def BuildFCTLAckForm(self):
"""
Display Name: ACK_Form
Default Value: 0
Value Format: decimal
Available enum values: No assistance provided, 0, ACK_1 Required, 1, reserved, 2, Ack_0 Required, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLAckForm']))
@property
def BuildFCTLFcDataCompression(self):
"""
Display Name: FC Data Compression
Default Value: 0
Value Format: decimal
Available enum values: No, 0, Yes, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFcDataCompression']))
@property
def BuildFCTLFcDataEncryption(self):
"""
Display Name: FC Data Encryption
Default Value: 0
Value Format: decimal
Available enum values: No, 0, Yes, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFcDataEncryption']))
@property
def BuildFCTLRetransmittedSequence(self):
"""
Display Name: Retransmitted Sequence
Default Value: 0
Value Format: decimal
Available enum values: Original, 0, Retransmission, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLRetransmittedSequence']))
@property
def BuildFCTLUnidirectionalTransmit(self):
"""
Display Name: Unidirectional Transmit
Default Value: 0
Value Format: decimal
Available enum values: Bi-directional, 0, Unidirectional, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLUnidirectionalTransmit']))
@property
def BuildFCTLContinueSeqCondition(self):
"""
Display Name: Continue Sequence Condition
Default Value: 0
Value Format: decimal
Available enum values: No information, 0, Sequence to follow-immediately, 1, Squence to follow-soon, 2, Sequence to follow-delayed, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLContinueSeqCondition']))
@property
def BuildFCTLAbortSeqCondition(self):
"""
Display Name: Abort Sequence Condition
Default Value: 0
Value Format: decimal
Available enum values: 0x00, 0, 0x01, 1, 0x10, 2, 0x11, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLAbortSeqCondition']))
@property
def BuildFCTLRelativeOffsetPresent(self):
"""
Display Name: Relative Offset Present
Default Value: 0
Value Format: decimal
Available enum values: Parameter field defined, 0, Relative offset, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLRelativeOffsetPresent']))
@property
def BuildFCTLExchangeReassembly(self):
"""
Display Name: Exchange Reassembly
Default Value: 0
Value Format: decimal
Available enum values: off, 0, on, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLExchangeReassembly']))
@property
def BuildFCTLFillBytes(self):
"""
Display Name: Fill Bytes
Default Value: 0
Value Format: decimal
Available enum values: 0 bytes of fill, 0, 1 bytes of fill, 1, 2 bytes of fill, 2, 3 bytes of fill, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BuildFCTLFillBytes']))
@property
def FcHeaderSeqID(self):
"""
Display Name: SEQ_ID
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderSeqID']))
@property
def FcHeaderDfCTL(self):
"""
Display Name: DF_CTL
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderDfCTL']))
@property
def FcHeaderSeqCNT(self):
"""
Display Name: SEQ_CNT
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderSeqCNT']))
@property
def FcHeaderOxID(self):
"""
Display Name: OX_ID
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderOxID']))
@property
def FcHeaderRxID(self):
"""
Display Name: RX_ID
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderRxID']))
@property
def FcHeaderParameter(self):
"""
Display Name: Parameter
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcHeaderParameter']))
@property
def FcCTRevision(self):
"""
Display Name: Revision
Default Value: 0x01
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTRevision']))
@property
def FcCTInId(self):
"""
Display Name: IN_ID
Default Value: 0x000000
Value Format: fCID
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTInId']))
@property
def FcCTGsType(self):
"""
Display Name: GS_Type
Default Value: 252
Value Format: decimal
Available enum values: Event Service, 244, Key Distribution Service, 247, Alias Service, 248, Management Service, 250, Time Service, 251, Directory Service, 252
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTGsType']))
@property
def FcCTGsSubtype(self):
"""
Display Name: GS_Subtype
Default Value: 0x02
Value Format: hex
Available enum values: X.500 Server (Obsolete), 1, Name Server, 2, IP Address Server (Obsolete), 3, FC-4 specific Servers, 128
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTGsSubtype']))
@property
def FcCTOptions(self):
"""
Display Name: Options
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTOptions']))
@property
def FcCTReserved(self):
"""
Display Name: Reserved
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCTReserved']))
@property
def DNSOpcode(self):
"""
Display Name: Command/Response Code
Default Value: 313
Value Format: decimal
Available enum values: GA_NXT, 256, GID_A, 257, GPN_ID, 274, GNN_ID, 275, GCS_ID, 276, GFT_ID, 279, GSPN_ID, 280, GPT_ID, 282, GIPP_ID, 283, GFPN_ID, 284, GHA_ID, 285, GFD_ID, 286, GFF_ID, 287, GID_PN, 289, GIPP_PN, 299, GID_NN, 305, GPN_NN, 306, GIP_NN, 309, GIPA_NN, 310, GSNN_NN, 313, GNN_IP, 339, GIPA_IP, 342, GID_FT, 369, GPN_FT, 370, GNN_FT, 371, GNN_FF, 384, GPN_FF, 385, GPN_SDFCP, 386, GID_PT, 417, GID_IPP, 433, GPN_IPP, 434, GID_FPN, 449, GPPN_ID, 465, GID_FF, 497, GID_DP, 498, RPN_ID, 530, RNN_ID, 531, RCS_ID, 532, RFT_ID, 535, RSPN_ID, 536, RPT_ID, 538, RIPP_ID, 539, RHA_ID, 541, RFD_ID, 542, RFF_ID, 543, RIP_NN, 565, RIPA_NN, 566, RSNN_NN, 569, DA_ID, 768, SSB, 32761, SSE, 32762
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DNSOpcode']))
@property
def DNSMaxsize(self):
"""
Display Name: Maximum/Residual Size
Default Value: 0x0000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DNSMaxsize']))
@property
def DNSReserved(self):
"""
Display Name: Reserved
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DNSReserved']))
@property
def DNSNodeName(self):
"""
Display Name: Node Name
Default Value: 0x2000000000000001
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['DNSNodeName']))
@property
def FcCRCAutoCRC(self):
"""
Display Name: Auto
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCRCAutoCRC']))
@property
def FcCRCGenerateBadCRC(self):
"""
Display Name: Bad CRC
Default Value: 0x01
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcCRCGenerateBadCRC']))
@property
def FcTrailerEEOF(self):
"""
Display Name: E-EOF
Default Value: 65
Value Format: decimal
Available enum values: EOFn - Normal, 65, EOFt - Terminate, 66, EOFrt - Remove Terminate, 68, EOFni - Normal Invalid, 73, EOFrti - Remove Terminate Invalid, 79, EOFa - Abort, 80
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcTrailerEEOF']))
@property
def FcTrailerReserved(self):
"""
Display Name: Reserved
Default Value: 0x000000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FcTrailerReserved']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
|
py
|
1a55aab051b3f8e83d75cd37e407bb5565d8dccf
|
# pylint: disable=no-self-use,invalid-name
import pytest
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.data.iterators import BucketIterator
from allennlp.tests.data.iterators.basic_iterator_test import IteratorTest
class TestBucketIterator(IteratorTest):
# pylint: disable=protected-access
def test_create_batches_groups_correctly(self):
iterator = BucketIterator(batch_size=2, padding_noise=0, sorting_keys=[('text', 'num_tokens')])
iterator.index_with(self.vocab)
batches = list(iterator._create_batches(self.instances, shuffle=False))
grouped_instances = [batch.instances for batch in batches]
assert grouped_instances == [[self.instances[4], self.instances[2]],
[self.instances[0], self.instances[1]],
[self.instances[3]]]
def test_create_batches_groups_correctly_with_max_instances(self):
# If we knew all the instances, the correct order is 4 -> 2 -> 0 -> 1 -> 3.
# Here max_instances_in_memory is 3, so we load instances [0, 1, 2]
# and then bucket them by size into batches of size 2 to get [2, 0] -> [1].
# Then we load the remaining instances and bucket them by size to get [4, 3].
iterator = BucketIterator(batch_size=2,
padding_noise=0,
sorting_keys=[('text', 'num_tokens')],
max_instances_in_memory=3)
iterator.index_with(self.vocab)
for test_instances in (self.instances, self.lazy_instances):
batches = list(iterator._create_batches(test_instances, shuffle=False))
grouped_instances = [batch.instances for batch in batches]
assert grouped_instances == [[self.instances[2], self.instances[0]],
[self.instances[1]],
[self.instances[4], self.instances[3]]]
def test_biggest_batch_first_works(self):
iterator = BucketIterator(batch_size=2,
padding_noise=0,
sorting_keys=[('text', 'num_tokens')],
biggest_batch_first=True)
iterator.index_with(self.vocab)
batches = list(iterator._create_batches(self.instances, shuffle=False))
grouped_instances = [batch.instances for batch in batches]
assert grouped_instances == [[self.instances[3]],
[self.instances[0], self.instances[1]],
[self.instances[4], self.instances[2]]]
def test_from_params(self):
# pylint: disable=protected-access
params = Params({})
with pytest.raises(ConfigurationError):
iterator = BucketIterator.from_params(params)
sorting_keys = [("s1", "nt"), ("s2", "nt2")]
params['sorting_keys'] = sorting_keys
iterator = BucketIterator.from_params(params)
assert iterator._sorting_keys == sorting_keys
assert iterator._padding_noise == 0.1
assert not iterator._biggest_batch_first
assert iterator._batch_size == 32
assert not iterator._skip_smaller_batches
params = Params({
"sorting_keys": sorting_keys,
"padding_noise": 0.5,
"biggest_batch_first": True,
"batch_size": 100,
"skip_smaller_batches": True
})
iterator = BucketIterator.from_params(params)
assert iterator._sorting_keys == sorting_keys
assert iterator._padding_noise == 0.5
assert iterator._biggest_batch_first
assert iterator._batch_size == 100
assert iterator._skip_smaller_batches
def test_bucket_iterator_maximum_samples_per_batch(self):
iterator = BucketIterator(
batch_size=3,
padding_noise=0,
sorting_keys=[('text', 'num_tokens')],
maximum_samples_per_batch=['num_tokens', 9]
)
iterator.index_with(self.vocab)
batches = list(iterator._create_batches(self.instances, shuffle=False))
stats = self.get_batches_stats(batches)
# ensure all instances are in a batch
assert stats['total_instances'] == len(self.instances)
# ensure correct batch sizes
assert stats['batch_lengths'] == [2, 2, 1]
# ensure correct sample sizes (<= 9)
assert stats['sample_sizes'] == [6, 8, 9]
def test_maximum_samples_per_batch_packs_tightly(self):
token_counts = [10, 4, 3]
test_instances = self.create_instances_from_token_counts(token_counts)
iterator = BucketIterator(
batch_size=3,
padding_noise=0,
sorting_keys=[('text', 'num_tokens')],
maximum_samples_per_batch=['num_tokens', 11]
)
iterator.index_with(self.vocab)
batches = list(iterator._create_batches(test_instances, shuffle=False))
stats = self.get_batches_stats(batches)
# ensure all instances are in a batch
assert stats['total_instances'] == len(test_instances)
# ensure correct batch sizes
assert stats['batch_lengths'] == [2, 1]
# ensure correct sample sizes (<= 11)
assert stats['sample_sizes'] == [8, 10]
def test_skip_smaller_batches_works(self):
iterator = BucketIterator(batch_size=2, padding_noise=0, sorting_keys=[('text', 'num_tokens')],
skip_smaller_batches=True)
iterator.index_with(self.vocab)
batches = list(iterator._create_batches(self.instances, shuffle=False))
stats = self.get_batches_stats(batches)
# all batches have length batch_size
assert all(batch_len == 2 for batch_len in stats['batch_lengths'])
# we should have lost one instance by skipping the last batch
assert stats['total_instances'] == len(self.instances) - 1
|
py
|
1a55ab291359659641e7de041c2c893c7fc5a6ae
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import satchmo_utils.fields
class Migration(migrations.Migration):
dependencies = [
('product', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CustomProduct',
fields=[
('product', models.OneToOneField(primary_key=True, serialize=False, to='product.Product', verbose_name='Product')),
('downpayment', models.IntegerField(default=20, verbose_name='Percent Downpayment')),
('deferred_shipping', models.BooleanField(default=False, help_text='Do not charge shipping at checkout for this item.', verbose_name='Deferred Shipping')),
('option_group', models.ManyToManyField(to='product.OptionGroup', verbose_name='Option Group', blank=True)),
],
options={
'verbose_name': 'Custom Product',
'verbose_name_plural': 'Custom Products',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CustomTextField',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=40, verbose_name='Custom field name')),
('slug', models.SlugField(help_text='Auto-generated from name if blank', verbose_name='Slug', blank=True)),
('sort_order', models.IntegerField(default=0, help_text='The display order for this group.', verbose_name='Sort Order')),
('price_change', satchmo_utils.fields.CurrencyField(null=True, verbose_name='Price Change', max_digits=14, decimal_places=6, blank=True)),
('products', models.ForeignKey(related_name='custom_text_fields', verbose_name='Custom Fields', to='custom.CustomProduct')),
],
options={
'ordering': ('sort_order',),
},
bases=(models.Model,),
),
migrations.CreateModel(
name='CustomTextFieldTranslation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('languagecode', models.CharField(max_length=10, verbose_name='language', choices=[(b'en', b'English')])),
('name', models.CharField(max_length=255, verbose_name='Translated Custom Text Field Name')),
('version', models.IntegerField(default=1, verbose_name='version')),
('active', models.BooleanField(default=True, verbose_name='active')),
('customtextfield', models.ForeignKey(related_name='translations', to='custom.CustomTextField')),
],
options={
'ordering': ('customtextfield', 'name', 'languagecode'),
'verbose_name': 'CustomTextField Translation',
'verbose_name_plural': 'CustomTextField Translations',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='customtextfieldtranslation',
unique_together=set([('customtextfield', 'languagecode', 'version')]),
),
migrations.AlterUniqueTogether(
name='customtextfield',
unique_together=set([('slug', 'products')]),
),
]
|
py
|
1a55ab42ec4b153f851c2eda9978e41ae0db72b1
|
#!/usr/bin/env python
#Copyright (C) 2009-2011 by Benedict Paten ([email protected])
#
#Released under the MIT license, see LICENSE.txt
import unittest
import sys
import os
from sonLib.bioio import getLogLevelString
from sonLib.bioio import parseSuiteTestOptions
from sonLib.bioio import system
class TestCase(unittest.TestCase):
def testCuTest(self):
system("matchingAndOrderingTests %s" % getLogLevelString())
def main():
parseSuiteTestOptions()
sys.argv = sys.argv[:1]
unittest.main()
if __name__ == '__main__':
main()
|
py
|
1a55ab7ff6d6480b3d3d0a155f34d30efc08b1c4
|
# Copyright 2018 The Fuego Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import intersection
import pytest
import numpy as np
def test_slope_intercept_90_degrees():
coord = intersection.slope_and_intercept(45,135,5,-5,5,5)
assert np.all(coord == [0,0])
def test_slope_intercept_45_degrees():
coord = intersection.slope_and_intercept(45,90,5,2,5,5)
assert np.all(coord == [2,2])
def test_slope_intercept_90_degrees_opp():
coord = intersection.slope_and_intercept(135,45,5,-5,-5,-5)
assert np.all(coord == [0,0])
def test_slope_intercept_45_degrees_opp():
coord = intersection.slope_and_intercept(45,90,-5,-2,-5,-5)
assert np.all(coord == [-2,-2])
|
py
|
1a55abb42c7b47812d11dc55bb91f57712e48365
|
"""Modules for multi-modal datasets
PCME
Copyright (c) 2021-present NAVER Corp.
MIT license
"""
from datasets._dataloader import prepare_coco_dataloaders, prepare_cub_dataloaders
from datasets.vocab import Vocabulary
__all__ = [
'Vocabulary',
'prepare_coco_dataloaders',
'prepare_cub_dataloaders',
]
|
py
|
1a55abb86b8a180f6a3927b41c74fce74e8ef80f
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyTetoolkit(PythonPackage):
"""TEToolkit is a software package that utilizes both unambiguously
(uniquely) and ambiguously (multi-) mapped reads to perform
differential enrichment analyses from high throughput sequencing
experiments."""
homepage = "http://hammelllab.labsites.cshl.edu/software"
pypi = "TEToolkit/TEToolkit-1.5.1.tar.gz"
version('2.0.3', sha256='1d0f5928b30c6cd9dbef8e092ae0c11e9e707faf92a19af8eed3e360da7d4e46')
version('1.5.1', sha256='22c13ca45bccc89e9d9bf48d59ae6db1fa4c634def64fc56ba9bffd23aa689ac')
depends_on('py-setuptools')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-pysam', type=('build', 'run'))
depends_on('r-deseq', when='@:1.5.1', type=('build', 'run'))
depends_on('r-deseq2', when='@2.0.0:', type=('build', 'run'))
|
py
|
1a55abe6bbde9010182f3e8862220d947373edff
|
#import copy
#from PhysicsTools.PatAlgos.tools.helpers import *
#
# Tracking
#
from RecoEgamma.EgammaElectronProducers.ecalDrivenElectronSeeds_cfi import *
uncleanedOnlyElectronSeeds = ecalDrivenElectronSeeds.clone(
barrelSuperClusters = cms.InputTag("uncleanedOnlyCorrectedHybridSuperClusters"),
endcapSuperClusters = cms.InputTag("uncleanedOnlyCorrectedMulti5x5SuperClustersWithPreshower")
)
from TrackingTools.GsfTracking.CkfElectronCandidateMaker_cff import *
uncleanedOnlyElectronCkfTrackCandidates = electronCkfTrackCandidates.clone(
src = cms.InputTag("uncleanedOnlyElectronSeeds")
)
from TrackingTools.GsfTracking.GsfElectronGsfFit_cff import *
uncleanedOnlyElectronGsfTracks = electronGsfTracks.clone(
src = 'uncleanedOnlyElectronCkfTrackCandidates'
)
uncleanedOnlyTrackingTask = cms.Task(uncleanedOnlyElectronSeeds,uncleanedOnlyElectronCkfTrackCandidates,uncleanedOnlyElectronGsfTracks)
uncleanedOnlyTracking = cms.Sequence(uncleanedOnlyTrackingTask)
#
# Conversions
#
from RecoEgamma.EgammaPhotonProducers.conversionTrackCandidates_cfi import *
uncleanedOnlyConversionTrackCandidates = conversionTrackCandidates.clone(
scHybridBarrelProducer = cms.InputTag("uncleanedOnlyCorrectedHybridSuperClusters"),
bcBarrelCollection = cms.InputTag("hybridSuperClusters","uncleanOnlyHybridSuperClusters"),
scIslandEndcapProducer = cms.InputTag("uncleanedOnlyCorrectedMulti5x5SuperClustersWithPreshower"),
bcEndcapCollection = cms.InputTag("multi5x5SuperClusters","uncleanOnlyMulti5x5EndcapBasicClusters")
)
from RecoEgamma.EgammaPhotonProducers.ckfOutInTracksFromConversions_cfi import *
uncleanedOnlyCkfOutInTracksFromConversions = ckfOutInTracksFromConversions.clone(
src = cms.InputTag("uncleanedOnlyConversionTrackCandidates","outInTracksFromConversions"),
producer = cms.string('uncleanedOnlyConversionTrackCandidates'),
ComponentName = cms.string('uncleanedOnlyCkfOutInTracksFromConversions')
)
from RecoEgamma.EgammaPhotonProducers.ckfInOutTracksFromConversions_cfi import *
uncleanedOnlyCkfInOutTracksFromConversions = ckfInOutTracksFromConversions.clone(
src = cms.InputTag("uncleanedOnlyConversionTrackCandidates","inOutTracksFromConversions"),
producer = cms.string('uncleanedOnlyConversionTrackCandidates'),
ComponentName = cms.string('uncleanedOnlyCkfInOutTracksFromConversions')
)
uncleanedOnlyCkfTracksFromConversionsTask = cms.Task(uncleanedOnlyConversionTrackCandidates,uncleanedOnlyCkfOutInTracksFromConversions,uncleanedOnlyCkfInOutTracksFromConversions)
uncleanedOnlyCkfTracksFromConversions = cms.Sequence(uncleanedOnlyCkfTracksFromConversionsTask)
from RecoEgamma.EgammaPhotonProducers.conversionTrackSequence_cff import *
uncleanedOnlyGeneralConversionTrackProducer = generalConversionTrackProducer.clone()
from RecoEgamma.EgammaPhotonProducers.conversionTrackSequence_cff import *
uncleanedOnlyInOutConversionTrackProducer = inOutConversionTrackProducer.clone(
TrackProducer = cms.string('uncleanedOnlyCkfInOutTracksFromConversions')
)
from RecoEgamma.EgammaPhotonProducers.conversionTrackSequence_cff import *
uncleanedOnlyOutInConversionTrackProducer = outInConversionTrackProducer.clone(
TrackProducer = cms.string('uncleanedOnlyCkfOutInTracksFromConversions')
)
from RecoEgamma.EgammaPhotonProducers.conversionTrackSequence_cff import *
uncleanedOnlyGsfConversionTrackProducer = gsfConversionTrackProducer.clone(
TrackProducer = cms.string('uncleanedOnlyElectronGsfTracks')
)
uncleanedOnlyConversionTrackProducersTask = cms.Task(uncleanedOnlyGeneralConversionTrackProducer,uncleanedOnlyInOutConversionTrackProducer,uncleanedOnlyOutInConversionTrackProducer,uncleanedOnlyGsfConversionTrackProducer)
uncleanedOnlyConversionTrackProducers = cms.Sequence(uncleanedOnlyConversionTrackProducersTask)
from RecoEgamma.EgammaPhotonProducers.conversionTrackSequence_cff import *
uncleanedOnlyInOutOutInConversionTrackMerger = inOutOutInConversionTrackMerger.clone(
TrackProducer2 = cms.InputTag('uncleanedOnlyOutInConversionTrackProducer'),
TrackProducer1 = cms.InputTag('uncleanedOnlyInOutConversionTrackProducer')
)
from RecoEgamma.EgammaPhotonProducers.conversionTrackSequence_cff import *
uncleanedOnlyGeneralInOutOutInConversionTrackMerger = generalInOutOutInConversionTrackMerger.clone(
TrackProducer2 = cms.InputTag('uncleanedOnlyGeneralConversionTrackProducer'),
TrackProducer1 = cms.InputTag('uncleanedOnlyInOutOutInConversionTrackMerger')
)
from RecoEgamma.EgammaPhotonProducers.conversionTrackSequence_cff import *
uncleanedOnlyGsfGeneralInOutOutInConversionTrackMerger = gsfGeneralInOutOutInConversionTrackMerger.clone(
TrackProducer2 = cms.InputTag('uncleanedOnlyGsfConversionTrackProducer'),
TrackProducer1 = cms.InputTag('uncleanedOnlyGeneralInOutOutInConversionTrackMerger')
)
uncleanedOnlyConversionTrackMergersTask = cms.Task(uncleanedOnlyInOutOutInConversionTrackMerger,uncleanedOnlyGeneralInOutOutInConversionTrackMerger,uncleanedOnlyGsfGeneralInOutOutInConversionTrackMerger)
uncleanedOnlyConversionTrackMergers = cms.Sequence(uncleanedOnlyConversionTrackMergersTask)
from RecoEgamma.EgammaPhotonProducers.allConversions_cfi import *
uncleanedOnlyAllConversions = allConversions.clone(
scBarrelProducer = cms.InputTag("uncleanedOnlyCorrectedHybridSuperClusters"),
bcBarrelCollection = cms.InputTag("hybridSuperClusters","uncleanOnlyHybridSuperClusters"),
scEndcapProducer = cms.InputTag("uncleanedOnlyCorrectedMulti5x5SuperClustersWithPreshower"),
bcEndcapCollection = cms.InputTag("multi5x5SuperClusters","uncleanOnlyMulti5x5EndcapBasicClusters"),
src = cms.InputTag("uncleanedOnlyGsfGeneralInOutOutInConversionTrackMerger")
)
uncleanedOnlyConversionsTask = cms.Task(uncleanedOnlyCkfTracksFromConversionsTask,uncleanedOnlyConversionTrackProducersTask,uncleanedOnlyConversionTrackMergersTask,uncleanedOnlyAllConversions)
uncleanedOnlyConversions = cms.Sequence(uncleanedOnlyConversionsTask)
#
# Particle Flow Tracking
#
from RecoParticleFlow.PFTracking.pfTrack_cfi import *
uncleanedOnlyPfTrack = pfTrack.clone(
GsfTrackModuleLabel = cms.InputTag("uncleanedOnlyElectronGsfTracks")
)
from RecoParticleFlow.PFTracking.pfConversions_cfi import *
uncleanedOnlyPfConversions = pfConversions.clone(
conversionCollection = cms.InputTag("allConversions")
)
from RecoParticleFlow.PFTracking.pfTrackElec_cfi import *
uncleanedOnlyPfTrackElec = pfTrackElec.clone(
PFConversions = cms.InputTag("uncleanedOnlyPfConversions"),
GsfTrackModuleLabel = cms.InputTag("uncleanedOnlyElectronGsfTracks"),
PFRecTrackLabel = cms.InputTag("uncleanedOnlyPfTrack")
)
uncleanedOnlyPfTrackingTask = cms.Task(uncleanedOnlyPfTrack,uncleanedOnlyPfConversions,uncleanedOnlyPfTrackElec)
uncleanedOnlyPfTracking = cms.Sequence(uncleanedOnlyPfTrackingTask)
#
# Electrons
#
from RecoEgamma.EgammaElectronProducers.gsfElectronCores_cfi import *
uncleanedOnlyGsfElectronCores = ecalDrivenGsfElectronCores.clone(
gsfTracks = cms.InputTag("uncleanedOnlyElectronGsfTracks"),
gsfPfRecTracks = cms.InputTag("uncleanedOnlyPfTrackElec")
)
from RecoEgamma.EgammaElectronProducers.gsfElectrons_cfi import *
uncleanedOnlyGsfElectrons = ecalDrivenGsfElectrons.clone(
gsfPfRecTracksTag = cms.InputTag("uncleanedOnlyPfTrackElec"),
gsfElectronCoresTag = cms.InputTag("uncleanedOnlyGsfElectronCores"),
seedsTag = cms.InputTag("uncleanedOnlyElectronSeeds")
)
uncleanedOnlyElectronsTask = cms.Task(uncleanedOnlyGsfElectronCores,uncleanedOnlyGsfElectrons)
uncleanedOnlyElectrons = cms.Sequence(uncleanedOnlyElectronsTask)
#
# Whole Sequence
#
uncleanedOnlyElectronTask = cms.Task(uncleanedOnlyTrackingTask,uncleanedOnlyConversionsTask,uncleanedOnlyPfTrackingTask,uncleanedOnlyElectronsTask)
uncleanedOnlyElectronSequence = cms.Sequence(uncleanedOnlyElectronTask)
|
py
|
1a55ad05951f69a8ccdfbcca932bfd1205e5030c
|
#!/usr/bin/env python
# coding: utf-8
# # Basics of convection
# The essential model of planetary solid-state circulation is Rayleigh-Benard convection, in which a fluid held between two plane layers of different temperatures is observed to spontaneously self-organise into counter-rotating cells to maximise the efficiency of transport {cite}`Getling1998-gv`. Such a model is governed by three principal dimensionless quantities:
# - The *Prandtl* number, the ratio of momentum diffusivity (or kinematic viscosity) $\nu$ to the thermal diffusivity $\kappa$:
#
# $$ Pr = \frac{\nu}{\kappa} $$
#
# - The *Reynolds* number, the ratio of inertial forces to viscous forces and hence a measure of flow turbulence (where $u$ is flow velocity and $L$ is a length scale):
#
# $$ Re =\frac{u L}{\nu} $$
#
# - And the *Rayleigh* number, the product of $Pr$ and $Re$, interperable as the ratio of buoyancy forces to viscous forces in the fluid (with thermal expansivity $\alpha$, gravity $g$, thermal gradient $\Delta T$, and length scale $b$):
#
# $$ Ra = \frac{\alpha g \Delta T b^3}{\kappa \nu} $$
#
# In addition to these three input variables, one unifying output variable - also dimensionless - commonly enters into the analysis, and will prove an essential razor throughout this thesis: the *Nusselt* number, a measure of the efficiency of global thermal transport relative to that expected by conduction alone. It can be given in terms of the rate of change of the dimensionless potential temperature $\theta^*$ with respect to dimensionless depth $y^*$ {cite}`Schubert2001-ea`:
#
# $$ Nu = \left| \frac{\partial \theta^*}{\partial y^*} \right| _S $$
#
# Where $|x|_S$ indicates the average value across a surface. The asterisks indicate a non-dimensionalised quantity: a common textbook convention. This is the definition we adhere to throughout, but there is some variability in how $Nu$ is defined in the literature. In non-curved domains, $Nu$ is equivalent to the dimensionless surface temperature gradient, and so it is confusingly defined as such in some contexts {cite}`Blankenbach1989-li`. There is also a practice of adding a constant $1$ to the expression, reflecting a difference of opinion over whether $Nu$ is best constructed as an arithmetic quantity (i.e $Nu$ as the convective flux after conductive flux is substracted) or as a geometric quantity, as we have stated it here. We prefer the latter usage, reterming the former as $Nu_{+}$.
# Because any convection of interest to us in the mantle is going to be occurring in the solid state, across great distances, and under tremendous heat gradients, we are able to make certain assumptions that simplify the analysis. Two are commonly made:
# - The *Boussinesq* approximation, in which non-gravitational terms of density are ignored, with the consequence that the fluid is incompressible. This is a justifiable assumption for just about any non-gaseous fluid, particularly one subjected to such extreme lithostatic pressures.
# - The infinite *Prandtl* assumption, in which momentum diffusivity is held to be much greater than thermal diffusivity. This is reasonable for the mantle given the measured value comes to at least $10^{23}$ {cite}`Schubert2001-ea`. Because $Pr \cdot Re = Ra$, this also implies that the *Reynolds* number must be infinitesimal, and hence that inertial forces and the turbulent effects thereof are negligible.
#
# These two simplifications have many consequences. For one, they allow the four dimensionless parameters above to be collapsed to only two: the *Rayleigh* number $Ra$ and the *Nusselt* number $Nu$ {cite}`Turcotte1967-cz`. It will shortly become clear that these two quantities actually bear a power-law relation through a third property, the '*beta* exponent' $\beta$, that in a single stroke unifies the intrinsic and extrinsic characteristics of planetary thermal transport:
#
# $$ Nu \propto Ra^{\beta}$$
#
# This elegant law forms the analytical cornerstone of our work.
#
# Though we ground ourselves in theory, our method is empirical. Thankfully, the scheme we have laid out also provides a powerful framework for numericisation: by eliminating inertial, compressive, and turbulent forces, we are able to construct mantle convection as a kind of Stokes Flow under the body forcing of gravity $g$, limited by conservation of mass and iterated by advection-diffusion, and so captured by the equations:
#
# $$ \begin{align*}
# \nabla p - \nabla \left( \eta D \right) &= \Delta \rho \mathbf{g} \\
# \nabla \cdot \mathbf{u} &= 0 \\
# \frac{\partial T}{\partial t} + \mathbf{u} \cdot \nabla T &= \kappa \nabla^2 T + H \\
# \end{align*} $$
#
# Where $\eta$ is dynamic viscosity, $D$ the strain rate tensor, $p$ dynamic pressure, $\Delta\rho$ the density anomaly, $\mathbf{g}$ the gravity vector, $\mathbf{u}$ the velocity vector, $T$ temperature, $\kappa$ thermal diffusivity, $t$ time, and $H$ a thermal source term, i.e. radiogenic heating.
# With these equations, we may implement an alternating cycle of instaneous pressure solutions followed by finite time-stepping of the temperature field (and any other state variables we choose to implement). Of course, such a system is meaningless and insoluble unless we further stipulate a geometry (width, length, depth, and curvature) and a set of boundary conditions for the temperature and velocity fields. The boundary conditions on each outer domain surface are typically set to be either:
# - Fixed in value (a 'Dirichlet' condition). For the temperature field, this would imply that the core and/or the space-facing surface of the planet are infinite thermal buffers. For the velocity field, this can be used - for example - to define surfaces that are impervious in the normal component and either no-stick, perfect-stick, or tractional in the parallel component.
# - Fixed in gradient (a 'Neumann' condition): for the temperature field, this would imply that the surface radiates heat at a fixed power, which in the case of zero power would make that boundary effectively insulating; for the velocity field, this essentially configures the strain rate in the chosen component.
#
# With respect to temperature, either of the above conditions can be set to inject or remove heat from the model, which - in tandem with the internal heat function $H$ - provides the fluid with the thermal situation its flow is expected to resolve, given sufficient time. On Earth, we imagine mantle convection and the interconnected phenomenon of surface tectonics to represent the Earth's own natural solution, or solution-in-progress, to the circumstance of volumetric radiogenic heating and basal heating from the core, although the debate over the relative significance of these is ancient and ongoing {cite}`Thomson1862-kb,Urey1955-zs,Korenaga2003-oy,Korenaga2008-js,Gando2011-sh,Mareschal2012-ie,Huang2013-eu,Jaupart2015-un`. For the models being discussed forthwith, we have restricted ourselves to free-slip velocity boundaries and Dirichlet thermal boundaries with a constant unit temperature gradient from top to bottom, such that the heating regime is always either purely basal or mixed.
#
# Within these simplifying constraints, almost limitless variety is possible - which is why this essential formulation has become common to virtually all studies of mantle convection. However, while it is hoped that somewhere within problem-space a solution resembling Earth may be found, such a solution must elude us until our grasp of the fundamentals is absolute; and there is still much about even the simplest rheologies that we do not understand.
# ## Linear rheologies: the isoviscous case
#
# When we talk about rheology, or a materials' style of flow, we are primarily talking about its viscosity function - the expression that maps the local viscosity at any point in the fluid to its context, history, or environment. While most fluids in the real world have complex 'Non-Newtonian' rheologies in which pressure and viscosity are co-dependent, mantle convection studies have often relied on simpler Newtonian rheologies that are more numerically and analytically tractable. These are what we here call 'linear rheologies'.
#
# The bulk of all mantle convection studies focus on one of the following three linear rheologies:
# - **Isoviscous** or 'constant viscosity' flow.
# - **Arrhenius**-type exponentially temperature-dependent viscosity.
# - **Spatialised** viscosity - either arbitrarily zoned or simply depth-dependent.
#
# This two-part investigation will focus on the isoviscous and Arrhenius types. The former can be viewed as a special case of the latter, so it makes a fitting launching-off point for this study.
|
py
|
1a55ad21423d419e2a50eaf5a2f7f4188c51a605
|
# Generated by Django 3.1.6 on 2021-02-18 18:41
import multiselectfield.db.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("structures", "0017_war_dec_notifications"),
]
operations = [
migrations.RemoveField(
model_name="notification",
name="notification_type",
),
migrations.AddField(
model_name="notification",
name="notif_type",
field=models.CharField(
db_index=True,
default="",
help_text="type of this notification as reported by ESI",
max_length=100,
verbose_name="type",
),
),
migrations.AlterField(
model_name="notification",
name="created",
field=models.DateTimeField(
default=None,
help_text="Date when this notification was first received from ESI",
null=True,
),
),
migrations.AlterField(
model_name="notification",
name="is_read",
field=models.BooleanField(
default=None,
help_text="True when this notification has read in the eve client",
null=True,
),
),
migrations.AlterField(
model_name="notification",
name="is_sent",
field=models.BooleanField(
default=False,
help_text="True when this notification has been forwarded to Discord",
),
),
migrations.AlterField(
model_name="notification",
name="is_timer_added",
field=models.BooleanField(
default=False,
help_text="True when a timer has been added for this notification",
null=True,
),
),
migrations.AlterField(
model_name="notification",
name="notification_id",
field=models.PositiveBigIntegerField(verbose_name="id"),
),
migrations.AlterField(
model_name="webhook",
name="notification_types",
field=multiselectfield.db.fields.MultiSelectField(
choices=[
("CharAppAcceptMsg", "Character joins corporation"),
("CharLeftCorpMsg", "Character leaves corporation"),
("MoonminingExtractionStarted", "Moonmining extraction started"),
("MoonminingLaserFired", "Moonmining laser fired"),
(
"MoonminingExtractionCancelled",
"Moonmining extraction cancelled",
),
("MoonminingExtractionFinished", "Moonmining extraction finished"),
(
"MoonminingAutomaticFracture",
"Moonmining automatic fracture triggered",
),
("StructureAnchoring", "Upwell structure anchoring"),
("StructureOnline", "Upwell structure went online"),
(
"StructureServicesOffline",
"Upwell structure services went offline",
),
("StructureWentHighPower", "Upwell structure went high power"),
("StructureWentLowPower", "Upwell structure went low power"),
("StructureUnanchoring", "Upwell structure unanchoring"),
("StructureFuelAlert", "Upwell structure fuel alert"),
("StructureUnderAttack", "Upwell structure is under attack"),
("StructureLostShields", "Upwell structure lost shields"),
("StructureLostArmor", "Upwell structure lost armor"),
("StructureDestroyed", "Upwell structure destroyed"),
("OwnershipTransferred", "Upwell structure ownership transferred"),
("OrbitalAttacked", "Customs office attacked"),
("OrbitalReinforced", "Customs office reinforced"),
("TowerAlertMsg", "Starbase attacked"),
("TowerResourceAlertMsg", "Starbase fuel alert"),
("EntosisCaptureStarted", "Sovereignty entosis capture started"),
(
"SovCommandNodeEventStarted",
"Sovereignty command node event started",
),
("SovAllClaimAquiredMsg", "Sovereignty DED claim acknowledgment"),
("SovStructureReinforced", "Sovereignty structure reinforced"),
("SovStructureDestroyed", "Sovereignty structure destroyed"),
("WarDeclared", "War declared"),
("AllyJoinedWarAggressorMsg", "War ally joined"),
("WarAdopted", "War adopted"),
("WarInherited", "War inherited"),
("CorpWarSurrenderMsg", "War party surrendered"),
("WarRetractedByConcord", "War retracted by Concord"),
],
default=[
"StructureAnchoring",
"StructureDestroyed",
"StructureFuelAlert",
"StructureLostArmor",
"StructureLostShields",
"StructureOnline",
"StructureServicesOffline",
"StructureUnderAttack",
"StructureWentHighPower",
"StructureWentLowPower",
"OrbitalAttacked",
"OrbitalReinforced",
"TowerAlertMsg",
"TowerResourceAlertMsg",
"SovStructureReinforced",
"SovStructureDestroyed",
],
help_text="select which type of notifications should be forwarded to this webhook",
max_length=704,
),
),
]
|
py
|
1a55b0c3bc9695eeb9f80163e12709e9a6b62b24
|
import json
import os
import pycspr
# A known casper test-net node address.
_NODE_ADDRESS = os.getenv("CASPER_NODE_ADDRESS", "3.136.227.9")
# A known block hash.
_BLOCK_HASH: bytes = bytes.fromhex("c7148e1e2e115d8fba357e04be2073d721847c982dc70d5c36b5f6d3cf66331c")
# A known block height.
_BLOCK_HEIGHT: int = 20652
def main():
"""Retrieves on-chain auction information.
"""
# Set client.
client: pycspr.NodeClient = pycspr.NodeClient(pycspr.NodeConnectionInfo(host=_NODE_ADDRESS))
# Set auction info scoped by current era.
auction_info_1: dict = client.queries.get_auction_info()
# Set auction info scoped by known hash.
auction_info_2: dict = client.queries.get_auction_info(_BLOCK_HASH)
# Set auction info scoped by known height.
auction_info_3: dict = client.queries.get_auction_info(_BLOCK_HEIGHT)
# Verify.
assert auction_info_1 != auction_info_2
assert auction_info_2 == auction_info_3
print("-----------------------------------------------------------------------------------------------------")
print(f"QUERIED TEST-NET NODE {_NODE_ADDRESS}")
print("-----------------------------------------------------------------------------------------------------")
print(f"Auction information = {json.dumps(auction_info_1, indent=4)}")
print("-----------------------------------------------------------------------------------------------------")
if __name__ == "__main__":
try:
main()
except Exception as err:
print(f"API ERROR @ NODE {_NODE_ADDRESS} :: {err}")
|
py
|
1a55b29843eeb6ec6bb0bc6355f9f5d0dc26fc41
|
from __future__ import absolute_import
from selenium import webdriver
from shishito.runtime.environment.shishito import ShishitoEnvironment
class ControlEnvironment(ShishitoEnvironment):
""" Local control environment. """
def get_capabilities(self, config_section):
""" Return dictionary of capabilities for specific config combination.
:param str config_section: section in platform/environment.properties config
:return: dict with capabilities
"""
get_opt = self.shishito_support.get_opt
default_capabilities = super().get_capabilities(config_section)
capabilities = {
'marionette': str(get_opt('firefox_marionette')).lower() == 'true',
}
return {**default_capabilities, **capabilities}
def start_driver(self, browser_type, capabilities, config_section=None):
""" Prepare selenium webdriver.
:param browser_type: type of browser for which prepare driver
:param capabilities: capabilities used for webdriver initialization
"""
# get browser profile
browser_profile = self.get_browser_profile(browser_type, capabilities, config_section)
# starts local browser
if browser_type == "firefox":
from selenium.webdriver.firefox.options import Options
firefox_options = Options()
for arg in self.get_browser_arguments(config_section):
firefox_options.add_argument(arg)
driver = webdriver.Firefox(browser_profile, desired_capabilities=capabilities,
firefox_options=firefox_options)
elif browser_type == "chrome":
driver = webdriver.Chrome(desired_capabilities=capabilities, chrome_options=browser_profile)
elif browser_type == "ie":
driver = webdriver.Ie(capabilities=capabilities)
elif browser_type == "phantomjs":
driver = webdriver.PhantomJS(desired_capabilities=capabilities)
elif browser_type == "opera":
driver = webdriver.Opera(desired_capabilities=capabilities)
# SafariDriver bindings for Python not yet implemented
# elif browser == "Safari":
# self.driver = webdriver.SafariDriver()
else:
raise ValueError('Unknown type of browser.')
return driver
def call_browser(self, config_section):
""" Start webdriver for given config section. Prepare capabilities for the browser, set browser resolution.
:param str config_section: section in platform/environment.properties config
:return: created webdriver
"""
# get browser capabilities
capabilities = self.get_capabilities(config_section)
# get browser type
browser_type = self.shishito_support.get_opt(config_section, 'browser').lower()
# get driver
driver = self.start_driver(browser_type, capabilities, config_section=config_section)
if browser_type.lower() == 'chrome':
self.set_download_path(driver)
# set browser size is defined
browser_size = self.shishito_support.get_opt(config_section, 'resolution')
if browser_size:
# default size --> leave it on webdriver
width, height = browser_size.split('x')
driver.set_window_size(width, height)
return driver
|
py
|
1a55b34f854bc69a65b643c098b1f085b4a13a54
|
# stdlib
import sys
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
# third party
from google.protobuf.reflection import GeneratedProtocolMessageType
from nacl.signing import SigningKey
from nacl.signing import VerifyKey
import pandas as pd
# syft relative
from ....core.pointer.pointer import Pointer
from ....decorators import syft_decorator
from ....lib import create_lib_ast
from ....logger import critical
from ....logger import debug
from ....logger import error
from ....logger import traceback_and_raise
from ....proto.core.node.common.client_pb2 import Client as Client_PB
from ....proto.core.node.common.metadata_pb2 import Metadata as Metadata_PB
from ....util import get_fully_qualified_name
from ...common.message import EventualSyftMessageWithoutReply
from ...common.message import ImmediateSyftMessageWithReply
from ...common.message import ImmediateSyftMessageWithoutReply
from ...common.message import SignedEventualSyftMessageWithoutReply
from ...common.message import SignedImmediateSyftMessageWithReply
from ...common.message import SignedImmediateSyftMessageWithoutReply
from ...common.message import SyftMessage
from ...common.serde.deserialize import _deserialize
from ...common.uid import UID
from ...io.location import Location
from ...io.location import SpecificLocation
from ...io.route import Route
from ...io.route import SoloRoute
from ...io.virtual import VirtualClientConnection
from ...node.common.service.obj_search_service import ObjectSearchMessage
from ..abstract.node import AbstractNodeClient
from .action.exception_action import ExceptionMessage
from .service.child_node_lifecycle_service import RegisterChildNodeMessage
class Client(AbstractNodeClient):
"""Client is an incredibly powerful abstraction in Syft. We assume that,
no matter where a client is, it can figure out how to communicate with
the Node it is supposed to point to. If I send you a client I have
with all of the metadata in it, you should have all the information
you need to know to interact with a node (although you might not
have permissions - clients should not store private keys)."""
@syft_decorator(typechecking=True)
def __init__(
self,
name: Optional[str],
routes: List[Route],
network: Optional[Location] = None,
domain: Optional[Location] = None,
device: Optional[Location] = None,
vm: Optional[Location] = None,
signing_key: Optional[SigningKey] = None,
verify_key: Optional[VerifyKey] = None,
):
name = f"{name} Client" if name is not None else None
super().__init__(
name=name, network=network, domain=domain, device=device, vm=vm
)
self.routes = routes
self.default_route_index = 0
# create a signing key if one isn't provided
if signing_key is None:
self.signing_key = SigningKey.generate()
else:
self.signing_key = signing_key
# if verify key isn't provided, get verify key from signing key
if verify_key is None:
self.verify_key = self.signing_key.verify_key
else:
self.verify_key = verify_key
self.install_supported_frameworks()
self.store = StoreClient(client=self)
@property
def icon(self) -> str:
icon = "📡"
sub = []
if self.vm is not None:
sub.append("🍰")
if self.device is not None:
sub.append("📱")
if self.domain is not None:
sub.append("🏰")
if self.network is not None:
sub.append("🔗")
if len(sub) > 0:
icon = f"{icon} ["
for s in sub:
icon += s
icon += "]"
return icon
@staticmethod
def deserialize_client_metadata_from_node(
metadata: Metadata_PB,
) -> Tuple[SpecificLocation, str, UID]:
# string of bytes
meta = _deserialize(blob=metadata)
return meta.node, meta.name, meta.id
def install_supported_frameworks(self) -> None:
self.lib_ast = create_lib_ast(client=self)
# first time we want to register for future updates
self.lib_ast.register_updates(self)
if self.lib_ast is not None:
for attr_name, attr in self.lib_ast.attrs.items():
setattr(self, attr_name, attr)
# shortcut syft.lib.python to just python
if hasattr(self.lib_ast, "syft"):
try:
lib_attr = getattr(self.lib_ast.syft, "lib", None)
if lib_attr is not None:
python_attr = getattr(lib_attr, "python", None)
setattr(self, "python", python_attr)
except Exception as e:
critical(f"Failed to set python attribute on client. {e}")
def add_me_to_my_address(self) -> None:
traceback_and_raise(NotImplementedError)
@syft_decorator(typechecking=True)
def register_in_memory_client(self, client: AbstractNodeClient) -> None:
# WARNING: Gross hack
route_index = self.default_route_index
# this ID should be unique but persistent so that lookups are universal
route = self.routes[route_index]
if isinstance(route, SoloRoute):
connection = route.connection
if isinstance(connection, VirtualClientConnection):
connection.server.node.in_memory_client_registry[
client.address.target_id.id
] = client
else:
traceback_and_raise(
Exception(
"Unable to save client reference without VirtualClientConnection"
)
)
else:
traceback_and_raise(
Exception("Unable to save client reference without SoloRoute")
)
@syft_decorator(typechecking=True)
def register(self, client: AbstractNodeClient) -> None:
debug(f"> Registering {client.pprint} with {self.pprint}")
self.register_in_memory_client(client=client)
msg = RegisterChildNodeMessage(
lookup_id=client.id,
child_node_client_address=client.address,
address=self.address,
)
if self.network is not None:
client.network = (
self.network
if self.network is not None # type: ignore # nested "is not None"
else client.network
)
# QUESTION
# if the client is a network and the domain is not none this will set it
# on the network causing an exception
# but we can't check if the client is a NetworkClient here because
# this is a superclass of NetworkClient
# Remove: if self.domain is not None:
# then see the test line node_test.py:
# bob_network_client.register(client=bob_domain_client)
if self.domain is not None:
client.domain = (
self.domain
if self.domain is not None # type: ignore # nested "is not None"
else client.domain
)
if self.device is not None:
client.device = (
self.device
if self.device is not None # type: ignore # nested "is not None"
else client.device
)
assert self.device == client.device
if self.vm is not None:
client.vm = self.vm
self.send_immediate_msg_without_reply(msg=msg)
@property
def id(self) -> UID:
"""This client points to an node, this returns the id of that node."""
traceback_and_raise(NotImplementedError)
# TODO fix the msg type but currently tensor needs SyftMessage
@syft_decorator(typechecking=True)
def send_immediate_msg_with_reply(
self,
msg: Union[SignedImmediateSyftMessageWithReply, ImmediateSyftMessageWithReply],
route_index: int = 0,
) -> SyftMessage:
route_index = route_index or self.default_route_index
if isinstance(msg, ImmediateSyftMessageWithReply):
output = (
f"> {self.pprint} Signing {msg.pprint} with "
+ f"{self.key_emoji(key=self.signing_key.verify_key)}"
)
debug(output)
msg = msg.sign(signing_key=self.signing_key)
response = self.routes[route_index].send_immediate_msg_with_reply(msg=msg)
if response.is_valid:
# check if we have an ExceptionMessage to trigger a local exception
# from a remote exception that we caused
if isinstance(response.message, ExceptionMessage):
exception_msg = response.message
exception = exception_msg.exception_type(exception_msg.exception_msg)
error(str(exception))
traceback_and_raise(exception)
else:
return response.message
traceback_and_raise(
Exception("Response was signed by a fake key or was corrupted in transit.")
)
# TODO fix the msg type but currently tensor needs SyftMessage
@syft_decorator(typechecking=True)
def send_immediate_msg_without_reply(
self,
msg: Union[
SignedImmediateSyftMessageWithoutReply, ImmediateSyftMessageWithoutReply
],
route_index: int = 0,
) -> None:
route_index = route_index or self.default_route_index
if isinstance(msg, ImmediateSyftMessageWithoutReply):
output = (
f"> {self.pprint} Signing {msg.pprint} with "
+ f"{self.key_emoji(key=self.signing_key.verify_key)}"
)
debug(output)
msg = msg.sign(signing_key=self.signing_key)
debug(f"> Sending {msg.pprint} {self.pprint} ➡️ {msg.address.pprint}")
self.routes[route_index].send_immediate_msg_without_reply(msg=msg)
@syft_decorator(typechecking=True)
def send_eventual_msg_without_reply(
self, msg: EventualSyftMessageWithoutReply, route_index: int = 0
) -> None:
route_index = route_index or self.default_route_index
output = (
f"> {self.pprint} Signing {msg.pprint} with "
+ f"{self.key_emoji(key=self.signing_key.verify_key)}"
)
debug(output)
signed_msg: SignedEventualSyftMessageWithoutReply = msg.sign(
signing_key=self.signing_key
)
self.routes[route_index].send_eventual_msg_without_reply(msg=signed_msg)
@syft_decorator(typechecking=True)
def __repr__(self) -> str:
return f"<Client pointing to node with id:{self.id}>"
@syft_decorator(typechecking=True)
def register_route(self, route: Route) -> None:
self.routes.append(route)
@syft_decorator(typechecking=True)
def set_default_route(self, route_index: int) -> None:
self.default_route = route_index
@syft_decorator(typechecking=True)
def _object2proto(self) -> Client_PB:
obj_type = get_fully_qualified_name(obj=self)
routes = [route.serialize() for route in self.routes]
network = self.network._object2proto() if self.network is not None else None
domain = self.domain._object2proto() if self.domain is not None else None
device = self.device._object2proto() if self.device is not None else None
vm = self.vm._object2proto() if self.vm is not None else None
client_pb = Client_PB(
obj_type=obj_type,
id=self.id.serialize(),
name=self.name,
routes=routes,
has_network=self.network is not None,
network=network,
has_domain=self.domain is not None,
domain=domain,
has_device=self.device is not None,
device=device,
has_vm=self.vm is not None,
vm=vm,
)
return client_pb
@staticmethod
def _proto2object(proto: Client_PB) -> "Client":
module_parts = proto.obj_type.split(".")
klass = module_parts.pop()
obj_type = getattr(sys.modules[".".join(module_parts)], klass)
network = (
SpecificLocation._proto2object(proto.network) if proto.has_network else None
)
domain = (
SpecificLocation._proto2object(proto.domain) if proto.has_domain else None
)
device = (
SpecificLocation._proto2object(proto.device) if proto.has_device else None
)
vm = SpecificLocation._proto2object(proto.vm) if proto.has_vm else None
routes = [SoloRoute._proto2object(route) for route in proto.routes]
obj = obj_type(
name=proto.name,
routes=routes,
network=network,
domain=domain,
device=device,
vm=vm,
)
if type(obj) != obj_type:
traceback_and_raise(
TypeError(
f"Deserializing Client. Expected type {obj_type}. Got {type(obj)}"
)
)
return obj
@staticmethod
def get_protobuf_schema() -> GeneratedProtocolMessageType:
return Client_PB
@property
def keys(self) -> str:
verify = (
self.key_emoji(key=self.signing_key.verify_key)
if self.signing_key is not None
else "🚫"
)
keys = f"🔑 {verify}"
return keys
class StoreClient:
def __init__(self, client: Client) -> None:
self.client = client
@property
def store(self) -> List[Pointer]:
msg = ObjectSearchMessage(
address=self.client.address, reply_to=self.client.address
)
results = self.client.send_immediate_msg_with_reply(msg=msg).results
# This is because of a current limitation in Pointer where we cannot
# serialize a client object. TODO: Fix limitation in Pointer so that we don't need this.
for result in results:
result.gc_enabled = False
result.client = self.client
return results
def __len__(self) -> int:
"""Return the number of items in the object store we're allowed to know about"""
return len(self.store)
def __getitem__(self, key: Union[str, int]) -> Pointer:
if isinstance(key, str):
matches = 0
match_obj: Optional[Pointer] = None
for obj in self.store:
if key in str(obj.id_at_location.value).replace("-", ""):
return obj
if key in obj.tags:
matches += 1
match_obj = obj
if matches == 1 and match_obj is not None:
return match_obj
elif matches > 1:
traceback_and_raise(KeyError("More than one item with tag:" + str(key)))
traceback_and_raise(KeyError("No such request found for id:" + str(key)))
if isinstance(key, int):
return self.store[key]
else:
traceback_and_raise(KeyError("Please pass in a string or int key"))
def __repr__(self) -> str:
return repr(self.store)
@property
def pandas(self) -> pd.DataFrame:
obj_lines: List[Dict[str, Any]] = list()
for obj in self.store:
obj_lines.append(
{
"ID": obj.id_at_location,
"Tags": obj.tags,
"Description": obj.description,
"object_type": obj.object_type,
}
)
return pd.DataFrame(obj_lines)
|
py
|
1a55b36c4dac293e0f8254c4aecfff5e0373bcca
|
#!/usr/bin/env python3
"""websocket cmd client for wssrv.py example."""
import argparse
import asyncio
import signal
import sys
import aiohttp
async def start_client(loop: asyncio.AbstractEventLoop, url: str) -> None:
name = input("Please enter your name: ")
# input reader
def stdin_callback() -> None:
line = sys.stdin.buffer.readline().decode("utf-8")
if not line:
loop.stop()
else:
ws.send_str(name + ": " + line)
loop.add_reader(sys.stdin.fileno(), stdin_callback)
async def dispatch() -> None:
while True:
msg = await ws.receive()
if msg.type == aiohttp.WSMsgType.TEXT:
print("Text: ", msg.data.strip())
elif msg.type == aiohttp.WSMsgType.BINARY:
print("Binary: ", msg.data)
elif msg.type == aiohttp.WSMsgType.PING:
await ws.pong()
elif msg.type == aiohttp.WSMsgType.PONG:
print("Pong received")
else:
if msg.type == aiohttp.WSMsgType.CLOSE:
await ws.close()
elif msg.type == aiohttp.WSMsgType.ERROR:
print("Error during receive %s" % ws.exception())
elif msg.type == aiohttp.WSMsgType.CLOSED:
pass
break
# send request
async with aiohttp.ClientSession() as client:
async with client.ws_connect(url, autoclose=False, autoping=False) as ws:
await dispatch()
ARGS = argparse.ArgumentParser(
description="websocket console client for wssrv.py example."
)
ARGS.add_argument(
"--host", action="store", dest="host", default="127.0.0.1", help="Host name"
)
ARGS.add_argument(
"--port", action="store", dest="port", default=8080, type=int, help="Port number"
)
if __name__ == "__main__":
args = ARGS.parse_args()
if ":" in args.host:
args.host, port = args.host.split(":", 1)
args.port = int(port)
url = f"http://{args.host}:{args.port}"
loop = asyncio.get_event_loop()
loop.add_signal_handler(signal.SIGINT, loop.stop)
loop.create_task(start_client(loop, url))
loop.run_forever()
|
py
|
1a55b3896777324ce82a5bc611a9231289060163
|
bole=True
n=0
while n<=30:
n=n+1
print(f'ola turma{n}')
break
print('passou')
|
py
|
1a55b4080af739125fd701fe8ecfb44c65fe7dd0
|
from setuptools import setup, find_packages
setup(
name='vid2slides',
version='0.0.4',
description='Extract a slideshow from a video presentation',
url='https://github.com/lukew3/vid2slides',
author='Luke Weiler',
author_email='[email protected]',
license='MIT',
packages=find_packages(),
install_requires=['click', 'Pillow'],
entry_points={
'console_scripts': ['vid2slides=vid2slides.cli:cli'],
},
)
|
py
|
1a55b4f8084a38de5617d36961b992fef0f3cb1f
|
import os
import signal
import psutil
from rest.api.loghelpers.message_dumper import MessageDumper
from rest.service.fluentd import Fluentd
class ProcessUtils:
def __init__(self, logger):
self.logger = logger
self.fluentd_utils = Fluentd(logger)
self.message_dumper = MessageDumper()
def on_terminate(self, proc):
self.fluentd_utils.emit(tag="process", msg=self.message_dumper.dump_message(
{"proc": str(proc), "returncode": proc.returncode}))
def log_process_err(self, proc, err=""):
self.fluentd_utils.emit(tag="process", msg=self.message_dumper.dump_message(
{"proc": str(proc), "error": err}))
@staticmethod
def find_procs_by_name(name):
""" Return a list of processes matching 'name' """
ls = []
for p in psutil.process_iter(["name", "exe", "cmdline"]):
if name == p.info['name'] or \
p.info['exe'] and os.path.basename(p.info['exe']) == name or \
p.info['cmdline'] and p.info['cmdline'][0] == name:
ls.append(p)
return ls
def kill_proc_tree(self, pid=os.getpid(), sig=signal.SIGTERM, include_parent=True, timeout=5):
"""Kill a process tree (including grandchildren) with signal
"sig" and return a (gone, still_alive) tuple.
"on_terminate", if specified, is a callback function which is
called as soon as a child terminates.
"""
if pid == os.getpid():
include_parent = False
parent = psutil.Process(pid=pid)
children = parent.children(recursive=True)
if include_parent:
children.append(parent)
for p in children:
try:
p.send_signal(sig)
except Exception as e:
self.log_process_err(proc=p, err=e.__str__())
gone, alive = psutil.wait_procs(children, timeout=timeout, callback=self.on_terminate)
return gone, alive
|
py
|
1a55b677c8fb2d289a6687b6a7c3ea12868b5871
|
import ncvis
vis = ncvis.NCVis(n_neighbors=15, M=16, ef_construction=200, n_init_epochs=20, n_epochs=50, min_dist=0.4, n_threads=-1, distance='euclidean')
|
py
|
1a55b6d37a8a463f0f25d42676490302e63b9182
|
import discord
import sqlite3
import re
from datetime import datetime
from discord import Message, TextChannel, Member, PartialEmoji
from discord.ext import commands
class Music(commands.Cog, name="Please don't stop the music"):
def __init__(self, client):
self.client = client
@commands.command(aliases=['history'])
@commands.is_owner()
async def gethistory(self, ctx):
count = 0
async for message in ctx.channel.history(limit = None, oldest_first = True):
# if message.author == self.client.user or message.channel.id != 399477609559490560:
# print('returned')
# return
if 'htt' in message.content and message.author != self.client.user :
main = sqlite3.connect('music.db')
cursor = main.cursor()
sql = ("INSERT or IGNORE INTO links(link, date_posted, author, jump_link, count) VALUES(?,?,?,?,?)")
val = (str(message.content), str(message.created_at.date()), str(message.author), str(message.jump_url), count)
cursor.execute(sql, val)
main.commit()
cursor.close()
main.close()
else:
pass
@commands.Cog.listener()
async def on_message(self, message:discord.Message):
if message.author == self.client.user:
return
else:
main = sqlite3.connect('music.db')
cursor = main.cursor()
if message.channel.id == 399477609559490560 and 'http' in message.content:
cursor.execute('SELECT link, date_posted, author, jump_link FROM links WHERE link LIKE ?', (message.content,))
result = cursor.fetchone()
if not result:
sql = ("INSERT INTO links(link, date_posted, author, jump_link) VALUES(?,?,?,?)")
val = (str(message.content), str(message.created_at.date()), str(message.author), str(message.jump_url))
cursor.execute(sql, val)
else:
embed = discord.Embed(colour = 0x7ed321, description = "This song/link was already posted!")
embed.timestamp = datetime.utcnow()
embed.set_author(name="Jungle Jive")
embed.set_footer(text=f'{self.client.user.name}', icon_url=f'{self.client.user.avatar_url}')
embed.add_field(name="Original Poster", value=f"{result[2]}")
embed.add_field(name="Link to original post", value=f"[Click here]({result[3]})")
embed.add_field(name="Date of original post", value=f"{result[1]}")
await message.channel.send(embed=embed)
main.commit()
cursor.close()
main.close()
@commands.command(aliases=['musicchat', 'chat'])
async def music_chat(self, ctx):
await ctx.send(file=discord.File('music_channel.png'))
def setup(client):
client.add_cog(Music(client))
print('Music Cog loaded')
|
py
|
1a55b9177615c691eb51445b6ae9926809db380a
|
# -*- coding: utf-8 -*-
"""
simulation script for benchmark data
"""
#%%
import sys
import os
sys.path.insert(0, ".." + os.sep + ".." + os.sep)
from benchmarking.benchmarking_tools import SurfaceCodeBenchmarkingTool
from qiskit.providers.aer.noise import NoiseModel
from qiskit.providers.aer.noise.errors import pauli_error, depolarizing_error
from surface_code.fitters import GraphDecoder
from surface_code.circuits import SurfaceCodeLogicalQubit
from qiskit import QuantumCircuit, execute, QuantumRegister, ClassicalRegister, Aer
from tqdm import tqdm
import multiprocessing as mp
# Noise Model Function
def get_noise_model(p_err):
error_gate1 = pauli_error([("X", p_err / 2), ("Z", p_err / 2), ("I", 1 - p_err)])
noise_model = NoiseModel()
noise_model.add_all_qubit_quantum_error(error_gate1, "id")
return noise_model
if __name__ == "__main__":
decoder_keys = [(d, 1) for d in range(3, 11, 2)]
benchmarking_tools = []
for decoder_key in tqdm(decoder_keys):
d = decoder_key[0]
T = decoder_key[1]
qubit = SurfaceCodeLogicalQubit(d)
qubit.stabilize()
qubit.identity_data()
qubit.stabilize()
qubit.readout_z()
benchmarking_tools.append(
SurfaceCodeBenchmarkingTool(
decoder=GraphDecoder(d=d, T=T),
readout_circuit=qubit,
noise_model_func=get_noise_model,
)
)
print("\nDONE SETTING UP DECODERS!\n")
for benchmarking_tool in benchmarking_tools:
print(
"\nSIMULATE: (d={},T={})\n".format(benchmarking_tool.d, benchmarking_tool.T)
)
correct_logical_value = 0
noise_values = [
5e-5,
1e-4,
2e-4,
5e-4,
1e-3,
2e-3,
4e-3,
5e-3,
6e-3,
7e-3,
8e-3,
9e-3,
1e-2,
2e-2,
]
benchmarking_tool.simulate_readout_mp(
correct_logical_value=correct_logical_value, noise_values=noise_values
)
# %%
|
py
|
1a55b9cb3df0051caf721460a1200a20de07fcd8
|
import requests
from bs4 import BeautifulSoup
import pytest
import time
import hashlib
def find_playstation_price(url):
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
price = soup.find(class_='psw-t-title-m').text
return price
def find_apple_store_price(url):
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
price = soup.find_all(
class_='inline-list__item inline-list__item--bulleted app-header__list__item--price')[0].get_text()
return price
def find_pioneer_firmware_versions(url):
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
price_str = soup.find(class_='version').text
price_arr = price_str.split()
price = price_arr[1]
return price
def find_wacom_price(url):
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
price = soup.find(class_='price').text
return price
def check_logsdon(url):
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
resp = soup.find(class_='_1Cfot')
hashed = hashlib.md5(str(resp).encode())
return hashed.hexdigest()
class Common:
def setup(self):
pass
def teardown(self):
time.sleep(5) # Sleep for 5 seconds
class TestPioneer(Common):
def test_pioneer_software_download_updated_firmware(self):
url = 'https://www.pioneerelectronics.com/PUSA/Car/NEX/DMH-W4600NEX'
expected_firmware = '1.31'
dmh_4600_next_fw = find_pioneer_firmware_versions(url)
assert expected_firmware in dmh_4600_next_fw
class TestLogsdon(Common):
def test_logsdon_website_for_any_changes(self):
url = 'https://www.farmhousebeer.com/'
expected_hashes = ['6adf97f83acf6453d4a6a4b1070f3754', '534952d2d7451c0709c8d0263a50005f']
actual_hash = check_logsdon(url)
assert actual_hash in expected_hashes
class TestPlaystation(Common):
def test_actraiser_renaissance(self):
url = 'https://store.playstation.com/en-us/product/UP0082-CUSA25035_00-ACTPS4APPNA00001/'
expected_price = '$29.99'
price = find_playstation_price(url)
assert expected_price == price
|
py
|
1a55bab1d2721a86cfb6364183df1f22a0099c99
|
import os
import threading
import queue
import asyncio
def convert_video(Q,file):
if not Q.empty():
async def covert_720p():
os.system('ffmpeg -i ' + file + ' -r 30 -b 2M -s 1280x720 ' + file + '_720.mp4')
print(threading.currentThread())
return '720P covert successfully'
async def covert_480p():
os.system('ffmpeg -i ' + file + ' -r 30 -b 1M -s 720x480 ' + file + '_480.mp4')
print(threading.currentThread())
return '480P covert successfully'
coroutine1 = covert_720p()
coroutine2 = covert_480p()
thread_list = [
asyncio.ensure_future(coroutine1),
asyncio.ensure_future(coroutine2),
]
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(thread_list))
for thread in thread_list:
print('thread: ', thread.result())
def main():
Q = queue.Queue()
path = 'D:\EC500\Exercise2'
for file in os.listdir(path):
if file.endswith('.mp4'):
Q.put(file)
convert_video(Q,file)
if __name__ == '__main__':
main()
|
py
|
1a55bc26d24a3d83e804258e44c5f9c41ad03e0e
|
import click
import py42.sdk.queries.alerts.filters as f
from py42.exceptions import Py42NotFoundError
from py42.sdk.queries.alerts.alert_query import AlertQuery
from py42.sdk.queries.alerts.filters import AlertState
from py42.sdk.queries.alerts.filters import RuleType
from py42.sdk.queries.alerts.filters import Severity
from py42.util import format_dict
import code42cli.cmds.search.options as searchopt
import code42cli.errors as errors
import code42cli.options as opt
from code42cli.bulk import generate_template_cmd_factory
from code42cli.bulk import run_bulk_process
from code42cli.click_ext.groups import OrderedGroup
from code42cli.cmds.search import SendToCommand
from code42cli.cmds.search.cursor_store import AlertCursorStore
from code42cli.cmds.search.options import server_options
from code42cli.cmds.util import convert_to_or_query
from code42cli.cmds.util import create_time_range_filter
from code42cli.cmds.util import try_get_default_header
from code42cli.date_helper import convert_datetime_to_timestamp
from code42cli.date_helper import limit_date_range
from code42cli.enums import JsonOutputFormat
from code42cli.enums import OutputFormat
from code42cli.file_readers import read_csv_arg
from code42cli.options import format_option
from code42cli.output_formats import OutputFormatter
from code42cli.util import hash_event
from code42cli.util import parse_timestamp
from code42cli.util import warn_interrupt
ALERTS_KEYWORD = "alerts"
ALERT_PAGE_SIZE = 25
begin = opt.begin_option(
ALERTS_KEYWORD,
callback=lambda ctx, param, arg: convert_datetime_to_timestamp(
limit_date_range(arg, max_days_back=90)
),
)
end = opt.end_option(ALERTS_KEYWORD)
checkpoint = opt.checkpoint_option(ALERTS_KEYWORD)
advanced_query = searchopt.advanced_query_option(ALERTS_KEYWORD)
severity_option = click.option(
"--severity",
multiple=True,
type=click.Choice(Severity.choices()),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.is_in_filter(f.Severity),
help="Filter alerts by severity. Defaults to returning all severities.",
)
filter_state_option = click.option(
"--state",
multiple=True,
type=click.Choice(AlertState.choices()),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.is_in_filter(f.AlertState),
help="Filter alerts by status. Defaults to returning all statuses.",
)
actor_option = click.option(
"--actor",
multiple=True,
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.is_in_filter(f.Actor),
help="Filter alerts by including the given actor(s) who triggered the alert. "
"Arguments must match the actor's cloud alias exactly.",
)
actor_contains_option = click.option(
"--actor-contains",
multiple=True,
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.contains_filter(f.Actor),
help="Filter alerts by including actor(s) whose cloud alias contains the given string.",
)
exclude_actor_option = click.option(
"--exclude-actor",
multiple=True,
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.not_in_filter(f.Actor),
help="Filter alerts by excluding the given actor(s) who triggered the alert. "
"Arguments must match actor's cloud alias exactly.",
)
exclude_actor_contains_option = click.option(
"--exclude-actor-contains",
multiple=True,
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.not_contains_filter(f.Actor),
help="Filter alerts by excluding actor(s) whose cloud alias contains the given string.",
)
rule_name_option = click.option(
"--rule-name",
multiple=True,
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.is_in_filter(f.RuleName),
help="Filter alerts by including the given rule name(s).",
)
exclude_rule_name_option = click.option(
"--exclude-rule-name",
multiple=True,
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.not_in_filter(f.RuleName),
help="Filter alerts by excluding the given rule name(s).",
)
rule_id_option = click.option(
"--rule-id",
multiple=True,
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.is_in_filter(f.RuleId),
help="Filter alerts by including the given rule id(s).",
)
exclude_rule_id_option = click.option(
"--exclude-rule-id",
multiple=True,
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.not_in_filter(f.RuleId),
help="Filter alerts by excluding the given rule id(s).",
)
rule_type_option = click.option(
"--rule-type",
multiple=True,
type=click.Choice(RuleType.choices()),
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.is_in_filter(f.RuleType),
help="Filter alerts by including the given rule type(s).",
)
exclude_rule_type_option = click.option(
"--exclude-rule-type",
multiple=True,
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.not_in_filter(f.RuleType),
help="Filter alerts by excluding the given rule type(s).",
)
description_option = click.option(
"--description",
multiple=True,
cls=searchopt.AdvancedQueryAndSavedSearchIncompatible,
callback=searchopt.contains_filter(f.Description),
help="Filter alerts by description. Does fuzzy search by default.",
)
send_to_format_options = click.option(
"-f",
"--format",
type=click.Choice(JsonOutputFormat(), case_sensitive=False),
help="The output format of the result. Defaults to json format.",
default=JsonOutputFormat.RAW,
)
alert_id_arg = click.argument("alert-id")
note_option = click.option("--note", help="A note to attach to the alert.")
update_state_option = click.option(
"--state",
help="The state to give to the alert.",
type=click.Choice(AlertState.choices()),
)
def _get_default_output_header():
return {
"id": "Id",
"name": "RuleName",
"actor": "Username",
"createdAt": "ObservedDate",
"state": "State",
"severity": "Severity",
"description": "Description",
}
def search_options(f):
f = checkpoint(f)
f = advanced_query(f)
f = end(f)
f = begin(f)
return f
def filter_options(f):
f = actor_option(f)
f = actor_contains_option(f)
f = exclude_actor_option(f)
f = exclude_actor_contains_option(f)
f = rule_name_option(f)
f = exclude_rule_name_option(f)
f = rule_id_option(f)
f = exclude_rule_id_option(f)
f = rule_type_option(f)
f = exclude_rule_type_option(f)
f = description_option(f)
f = severity_option(f)
f = filter_state_option(f)
return f
@click.group(cls=OrderedGroup)
@opt.sdk_options(hidden=True)
def alerts(state):
"""Get and send alert data."""
# store cursor getter on the group state so shared --begin option can use it in validation
state.cursor_getter = _get_alert_cursor_store
@alerts.command()
@click.argument("checkpoint-name")
@opt.sdk_options()
def clear_checkpoint(state, checkpoint_name):
"""Remove the saved alert checkpoint from `--use-checkpoint/-c` mode."""
_get_alert_cursor_store(state.profile.name).delete(checkpoint_name)
@alerts.command()
@filter_options
@search_options
@click.option(
"--or-query", is_flag=True, cls=searchopt.AdvancedQueryAndSavedSearchIncompatible
)
@opt.sdk_options()
@click.option(
"--include-all",
default=False,
is_flag=True,
help="Display simple properties of the primary level of the nested response.",
)
@format_option
def search(
cli_state,
format,
begin,
end,
advanced_query,
use_checkpoint,
or_query,
include_all,
**kwargs,
):
"""Search for alerts."""
output_header = try_get_default_header(
include_all, _get_default_output_header(), format
)
formatter = OutputFormatter(format, output_header)
cursor = _get_alert_cursor_store(cli_state.profile.name) if use_checkpoint else None
if use_checkpoint:
checkpoint_name = use_checkpoint
checkpoint = cursor.get(checkpoint_name)
if checkpoint is not None:
begin = checkpoint
query = _construct_query(cli_state, begin, end, advanced_query, or_query)
alerts_gen = cli_state.sdk.alerts.get_all_alert_details(query)
if use_checkpoint:
checkpoint_name = use_checkpoint
# update checkpoint to alertId of last event retrieved
alerts_gen = _dedupe_checkpointed_events_and_store_updated_checkpoint(
cursor, checkpoint_name, alerts_gen
)
alerts_list = []
for alert in alerts_gen:
alerts_list.append(alert)
if not alerts_list:
click.echo("No results found.")
return
formatter.echo_formatted_list(alerts_list)
def _construct_query(state, begin, end, advanced_query, or_query):
if advanced_query:
state.search_filters = advanced_query
else:
if begin or end:
state.search_filters.append(
create_time_range_filter(f.DateObserved, begin, end)
)
if or_query:
state.search_filters = convert_to_or_query(state.search_filters)
query = AlertQuery(*state.search_filters)
query.page_size = ALERT_PAGE_SIZE
query.sort_direction = "asc"
query.sort_key = "CreatedAt"
return query
def _dedupe_checkpointed_events_and_store_updated_checkpoint(
cursor, checkpoint_name, alerts_gen
):
"""De-duplicates events across checkpointed runs. Since using the timestamp of the last event
processed as the `--begin` time of the next run causes the last event to show up again in the
next results, we hash the last event(s) of each run and store those hashes in the cursor to
filter out on the next run. It's also possible that two events have the exact same timestamp, so
`checkpoint_events` needs to be a list of hashes so we can filter out everything that's actually
been processed.
"""
checkpoint_alerts = cursor.get_alerts(checkpoint_name)
new_timestamp = None
new_alerts = []
for alert in alerts_gen:
event_hash = hash_event(alert)
if event_hash not in checkpoint_alerts:
if alert[f.DateObserved._term] != new_timestamp:
new_timestamp = alert[f.DateObserved._term]
new_alerts.clear()
new_alerts.append(event_hash)
yield alert
ts = parse_timestamp(new_timestamp)
cursor.replace(checkpoint_name, ts)
cursor.replace_alerts(checkpoint_name, new_alerts)
@alerts.command(cls=SendToCommand)
@filter_options
@search_options
@click.option(
"--or-query", is_flag=True, cls=searchopt.AdvancedQueryAndSavedSearchIncompatible
)
@opt.sdk_options()
@server_options
@click.option(
"--include-all",
default=False,
is_flag=True,
help="Display simple properties of the primary level of the nested response.",
)
@send_to_format_options
def send_to(cli_state, begin, end, advanced_query, use_checkpoint, or_query, **kwargs):
"""Send alerts to the given server address.
HOSTNAME format: address:port where port is optional and defaults to 514.
"""
cursor = _get_cursor(cli_state, use_checkpoint)
if use_checkpoint:
checkpoint_name = use_checkpoint
checkpoint = cursor.get(checkpoint_name)
if checkpoint is not None:
begin = checkpoint
query = _construct_query(cli_state, begin, end, advanced_query, or_query)
alerts_gen = cli_state.sdk.alerts.get_all_alert_details(query)
if use_checkpoint:
checkpoint_name = use_checkpoint
alerts_gen = _dedupe_checkpointed_events_and_store_updated_checkpoint(
cursor, checkpoint_name, alerts_gen
)
with warn_interrupt():
alert = None
for alert in alerts_gen:
cli_state.logger.info(alert)
if alert is None: # generator was empty
click.echo("No results found.")
def _get_cursor(state, use_checkpoint):
return _get_alert_cursor_store(state.profile.name) if use_checkpoint else None
def _get_alert_cursor_store(profile_name):
return AlertCursorStore(profile_name)
@alerts.command()
@opt.sdk_options()
@alert_id_arg
@click.option(
"--include-observations", is_flag=True, help="View observations of the alert."
)
def show(state, alert_id, include_observations):
"""Display the details of a single alert."""
formatter = OutputFormatter(OutputFormat.TABLE, _get_default_output_header())
try:
response = state.sdk.alerts.get_details(alert_id)
except Py42NotFoundError:
raise errors.Code42CLIError(f"No alert found with ID '{alert_id}'.")
alert = response["alerts"][0]
formatter.echo_formatted_list([alert])
# Show note details
note = alert.get("note")
if note:
click.echo("\nNote:\n")
click.echo(format_dict(note))
if include_observations:
observations = alert.get("observations")
if observations:
click.echo("\nObservations:\n")
click.echo(format_dict(observations))
else:
click.echo("\nNo observations found.")
@alerts.command()
@opt.sdk_options()
@alert_id_arg
@update_state_option
@note_option
def update(cli_state, alert_id, state, note):
"""Update alert information."""
_update_alert(cli_state.sdk, alert_id, state, note)
@alerts.group(cls=OrderedGroup)
@opt.sdk_options(hidden=True)
def bulk(state):
"""Tools for executing bulk alert actions."""
pass
UPDATE_ALERT_CSV_HEADERS = ["id", "state", "note"]
update_alerts_generate_template = generate_template_cmd_factory(
group_name=ALERTS_KEYWORD,
commands_dict={"update": UPDATE_ALERT_CSV_HEADERS},
help_message="Generate the CSV template needed for bulk alert commands.",
)
bulk.add_command(update_alerts_generate_template)
@bulk.command(
name="update",
help=f"Bulk update alerts using a CSV file with format: {','.join(UPDATE_ALERT_CSV_HEADERS)}",
)
@opt.sdk_options()
@read_csv_arg(headers=UPDATE_ALERT_CSV_HEADERS)
def bulk_update(cli_state, csv_rows):
"""Bulk update alerts."""
sdk = cli_state.sdk
def handle_row(id, state, note):
_update_alert(sdk, id, state, note)
run_bulk_process(
handle_row, csv_rows, progress_label="Updating alerts:",
)
def _update_alert(sdk, alert_id, alert_state, note):
if alert_state:
sdk.alerts.update_state(alert_state, [alert_id], note=note)
elif note:
sdk.alerts.update_note(alert_id, note)
|
py
|
1a55bca72bf3a7d4416cbcb4ab53b53f11524b5f
|
# coding: utf-8
"""
CloudEndure API documentation
© 2017 CloudEndure All rights reserved # General Request authentication in CloudEndure's API is done using session cookies. A session cookie is returned upon successful execution of the \"login\" method. This value must then be provided within the request headers of all subsequent API requests. ## Errors Some errors are not specifically written in every method since they may always return. Those are: 1) 401 (Unauthorized) - for unauthenticated requests. 2) 405 (Method Not Allowed) - for using a method that is not supported (POST instead of GET). 3) 403 (Forbidden) - request is authenticated, but the user is not allowed to access. 4) 422 (Unprocessable Entity) - for invalid input. ## Formats All strings with date-time format are according to RFC3339. All strings with \"duration\" format are according to ISO8601. For example, a full day duration can be specified with \"PNNNND\". # noqa: E501
OpenAPI spec version: 5
Contact: https://bit.ly/2T54hSc
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
from cloudendure import cloudendure_api
from cloudendure.cloudendure_api.rest import ApiException
from models.cloud_endure_account_request import CloudEndureAccountRequest # noqa: E501
class TestCloudEndureAccountRequest(unittest.TestCase):
"""CloudEndureAccountRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testCloudEndureAccountRequest(self):
"""Test CloudEndureAccountRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = cloudendure_api.models.cloud_endure_account_request.CloudEndureAccountRequest() # noqa: E501
pass
if __name__ == "__main__":
unittest.main()
|
py
|
1a55bcda03c90a7ea7873cefaeb8219fef138986
|
"""
Functions for creating and restoring url-safe signed JSON objects.
The format used looks like this:
>>> signing.dumps("hello")
'ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk'
There are two components here, separated by a ':'. The first component is a
URLsafe base64 encoded JSON of the object passed to dumps(). The second
component is a base64 encoded hmac/SHA1 hash of "$first_component:$secret"
signing.loads(s) checks the signature and returns the deserialized object.
If the signature fails, a BadSignature exception is raised.
>>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk")
'hello'
>>> signing.loads("ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified")
...
BadSignature: Signature failed: ImhlbGxvIg:1QaUZC:YIye-ze3TTx7gtSv422nZA4sgmk-modified
You can optionally compress the JSON prior to base64 encoding it to save
space, using the compress=True argument. This checks if compression actually
helps and only applies compression if the result is a shorter string:
>>> signing.dumps(range(1, 20), compress=True)
'.eJwFwcERACAIwLCF-rCiILN47r-GyZVJsNgkxaFxoDgxcOHGxMKD_T7vhAml:1QaUaL:BA0thEZrp4FQVXIXuOvYJtLJSrQ'
The fact that the string is compressed is signalled by the prefixed '.' at the
start of the base64 JSON.
There are 65 url-safe characters: the 64 used by url-safe base64 and the ':'.
These functions make use of all of them.
"""
import base64
import datetime
import json
import re
import time
import zlib
from django.conf import settings
from django.utils import baseconv
from django.utils.crypto import constant_time_compare, salted_hmac
from django.utils.encoding import force_bytes
from django.utils.module_loading import import_string
_SEP_UNSAFE = re.compile(r'^[A-z0-9-_=]*$')
class BadSignature(Exception):
"""Signature does not match."""
pass
class SignatureExpired(BadSignature):
"""Signature timestamp is older than required max_age."""
pass
def b64_encode(s):
return base64.urlsafe_b64encode(s).strip(b'=')
def b64_decode(s):
pad = b'=' * (-len(s) % 4)
return base64.urlsafe_b64decode(s + pad)
def base64_hmac(salt, value, key):
return b64_encode(salted_hmac(salt, value, key).digest()).decode()
def get_cookie_signer(salt='django.core.signing.get_cookie_signer'):
Signer = import_string(settings.SIGNING_BACKEND)
key = force_bytes(settings.SECRET_KEY) # SECRET_KEY may be str or bytes.
return Signer(b'django.http.cookies' + key, salt=salt)
class JSONSerializer:
"""
Simple wrapper around json to be used in signing.dumps and
signing.loads.
"""
def dumps(self, obj):
return json.dumps(obj, separators=(',', ':')).encode('latin-1')
def loads(self, data):
return json.loads(data.decode('latin-1'))
def dumps(obj, key=None, salt='django.core.signing', serializer=JSONSerializer, compress=False):
"""
Return URL-safe, hmac/SHA1 signed base64 compressed JSON string. If key is
None, use settings.SECRET_KEY instead.
If compress is True (not the default), check if compressing using zlib can
save some space. Prepend a '.' to signify compression. This is included
in the signature, to protect against zip bombs.
Salt can be used to namespace the hash, so that a signed string is
only valid for a given namespace. Leaving this at the default
value or re-using a salt value across different parts of your
application without good cause is a security risk.
The serializer is expected to return a bytestring.
"""
data = serializer().dumps(obj)
# Flag for if it's been compressed or not
is_compressed = False
if compress:
# Avoid zlib dependency unless compress is being used
compressed = zlib.compress(data)
if len(compressed) < (len(data) - 1):
data = compressed
is_compressed = True
base64d = b64_encode(data).decode()
if is_compressed:
base64d = '.' + base64d
return TimestampSigner(key, salt=salt).sign(base64d)
def loads(s, key=None, salt='django.core.signing', serializer=JSONSerializer, max_age=None):
"""
Reverse of dumps(), raise BadSignature if signature fails.
The serializer is expected to accept a bytestring.
"""
# TimestampSigner.unsign() returns str but base64 and zlib compression
# operate on bytes.
base64d = TimestampSigner(key, salt=salt).unsign(s, max_age=max_age).encode()
decompress = base64d[:1] == b'.'
if decompress:
# It's compressed; uncompress it first
base64d = base64d[1:]
data = b64_decode(base64d)
if decompress:
data = zlib.decompress(data)
return serializer().loads(data)
class Signer:
def __init__(self, key=None, sep=':', salt=None):
# Use of native strings in all versions of Python
self.key = key or settings.SECRET_KEY
self.sep = sep
if _SEP_UNSAFE.match(self.sep):
raise ValueError(
'Unsafe Signer separator: %r (cannot be empty or consist of '
'only A-z0-9-_=)' % sep,
)
self.salt = salt or '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
def signature(self, value):
return base64_hmac(self.salt + 'signer', value, self.key)
def sign(self, value):
return '%s%s%s' % (value, self.sep, self.signature(value))
def unsign(self, signed_value):
if self.sep not in signed_value:
raise BadSignature('No "%s" found in value' % self.sep)
value, sig = signed_value.rsplit(self.sep, 1)
if constant_time_compare(sig, self.signature(value)):
return value
raise BadSignature('Signature "%s" does not match' % sig)
class TimestampSigner(Signer):
def timestamp(self):
return baseconv.base62.encode(int(time.time()))
def sign(self, value):
value = '%s%s%s' % (value, self.sep, self.timestamp())
return super().sign(value)
def unsign(self, value, max_age=None):
"""
Retrieve original value and check it wasn't signed more
than max_age seconds ago.
"""
result = super().unsign(value)
value, timestamp = result.rsplit(self.sep, 1)
timestamp = baseconv.base62.decode(timestamp)
if max_age is not None:
if isinstance(max_age, datetime.timedelta):
max_age = max_age.total_seconds()
# Check timestamp is not older than max_age
age = time.time() - timestamp
if age > max_age:
raise SignatureExpired(
'Signature age %s > %s seconds' % (age, max_age))
return value
|
py
|
1a55bcf9433cb47ed26373df5bd50d32fae15921
|
import os
import sys
import miind.include as include
import miind.algorithms as algorithms
import miind.nodes as nodes
import miind.connections as connections
import miind.simulation as simulation
import miind.variables as variables
import xml.etree.ElementTree as ET
import argparse
import miind.directories as directories
XML_EXTENSION = '.xml'
# Nothing too fancy for the weight type
WEIGHTTYPES = ['double', 'DelayedConnection']
def generate_preamble(outfile):
outfile.write('//Machine-generated by miind.py. Edit at your own risk.\n\n')
for inc in include.includes:
outfile.write(inc +'\n')
return
def generate_closing(outfile, steps, type, t_step):
outfile.write('\tnetwork.configureSimulation(par_run);\n')
# outfile.write('\tstd::thread t1(TwoDLib::Display::stat_runthreaded);\n')
# outfile.write('\tt1.detach();\n')
# outfile.write('\tnetwork.evolve();\n')
if type == "DelayedConnection":
s = "MPILib::" + type
else:
s = "double"
outfile.write('\tstd::vector<MPILib::NodeId> nodes;\n')
outfile.write('\tnodes.push_back(0);\n')
outfile.write('\tTwoDLib::Display::getInstance()->animate(true, nodes,' + t_step + ');\n')
outfile.write('\tnetwork.startSimulation();\n')
outfile.write('\tMPILib::utilities::ProgressBar *pb = new MPILib::utilities::ProgressBar(' + steps + ');\n')
outfile.write('\tlong count = 0;\n')
outfile.write('\twhile(count < ' + steps + ') {\n')
outfile.write('\t\tnetwork.evolveSingleStep(std::vector<MPILib::ActivityType>());\n')
outfile.write('\t\tnetwork.reportNodeActivities(nodes);\n')
outfile.write('\t\tTwoDLib::Display::getInstance()->updateDisplay(count);\n')
outfile.write('\t\tTwoDLib::GridReport<TwoDLib::GridAlgorithm<DelayedConnection>>::getInstance()->reportDensity();\n')
outfile.write('\t\t(*pb)++;\n')
outfile.write('\t\tcount++;\n')
outfile.write('\t}\n')
outfile.write('\tnetwork.endSimulation();\n')
outfile.write('\t} catch(std::exception& exc){\n')
outfile.write('\t\tstd::cout << exc.what() << std::endl;\n')
outfile.write('#ifdef ENABLE_MPI\n')
outfile.write('\t//Abort the MPI environment in the correct way :\n')
outfile.write('\tenv.abort(1);\n')
outfile.write('#endif\n')
outfile.write('\t}\n\n')
outfile.write('\tMPILib::utilities::MPIProxy().barrier();\n')
outfile.write('\tt.stop();\n')
outfile.write('\tif (MPILib::utilities::MPIProxy().getRank() == 0) {\n')
outfile.write('\n\t\tstd::cout << \"Overall time spend\\n\";\n')
outfile.write('\t\tt.report();\n')
outfile.write('\t}\n')
outfile.write('\treturn 0;\n}\n')
for t in algorithms.RATEFUNCTIONS:
outfile.write(t)
return
def define_network_type(outfile, type):
if type == "DelayedConnection":
s = "MPILib::" + type
else:
s = "double"
return 'typedef MPILib::MPINetwork<' + s + ', MPILib::utilities::CircularDistribution> Network;\n'
def parse_xml(infile, outfile):
tree=ET.fromstring(infile.read())
m=tree.find('WeightType')
s = m.text
return define_network_type(outfile,s), tree
def generate_opening(outfile):
outfile.write('int main(int argc, char *argv[]){\n\tNetwork network;\n')
outfile.write('\tboost::timer::auto_cpu_timer t;\n\n')
outfile.write('#ifdef ENABLE_MPI\n')
outfile.write('\t// initialise the mpi environment this cannot be forwarded to a class\n')
outfile.write('\tboost::mpi::environment env(argc, argv);\n')
outfile.write('#endif\n\n')
outfile.write('\ttry {')
def model_name(fn):
'''Identifies model files mentioned in an XML file. For example used in placing the right model file in
the same directory as an XML file.'''
infile = open(fn)
tree=ET.fromstring(infile.read())
ma = tree.findall('Algorithms/Algorithm')
modelnames = []
for a in ma:
if a.attrib['type'] == 'MeshAlgorithm' or a.attrib['type'] == 'GridAlgorithm':
modelnames.append(a.attrib['modelfile'])
return modelnames
def matrix_transform_name(fn):
'''Identifies matrix transform files mentioned in an XML file. For example used in placing the right model file in
the same directory as an XML file.'''
infile = open(fn)
tree=ET.fromstring(infile.read())
ma = tree.findall('Algorithms/Algorithm')
tmatnames = []
for a in ma:
if a.attrib['type'] == 'GridAlgorithm':
tmatnames.append(a.attrib['transformfile'])
return tmatnames
def matrix_names(fn):
'''Find the file names of all MatrixFiles, mentioned in an XML file.'''
infile = open(fn)
tree=ET.fromstring(infile.read())
ma = tree.findall('Algorithms/Algorithm/MatrixFile')
matrixnames = []
for a in ma:
matrixnames.append(a.text)
return matrixnames
def generate_outputfile(infile, outfile, enable_root):
generate_preamble(outfile)
nettype, tree = parse_xml(infile,outfile)
outfile.write(nettype)
outfile.write('\t// defining variables\n') # whatever variables are use are global
variable_list = tree.findall('Variable')
variables.parse_variables(variable_list,outfile)
algies = tree.findall('Algorithms')
if len(algies) != 1:
raise ValueError
alg_list = algies[0].findall('Algorithm')
weighttype = tree.find('WeightType')
generate_opening(outfile)
outfile.write('\t// generating algorithms\n')
algorithms.parse_algorithms(alg_list,weighttype,outfile)
node_list = tree.findall('Nodes/Node')
outfile.write('\t// generating nodes\n')
nodes.parse_nodes(node_list,weighttype,outfile)
outfile.write('\t// generating connections\n')
connection_list = tree.findall('Connections/Connection')
connections.parse_connections(connection_list,weighttype,outfile)
outfile.write('\t// generation simulation parameter\n')
simhand = tree.find('SimulationIO')
simulation.parse_simulation(simhand,outfile,enable_root)
simpar = tree.find('SimulationRunParameter')
simulation.parse_parameter(simpar,outfile)
t_begin = tree.find('SimulationRunParameter/t_begin')
t_end = tree.find('SimulationRunParameter/t_end')
t_step = tree.find('SimulationRunParameter/t_step')
m=tree.find('WeightType')
s = m.text
generate_closing(outfile, '(' + t_end.text + ' - ' + t_begin.text + ') / ' + t_step.text , s , t_step.text)
algorithms.reset_algorithms()
nodes.reset_nodes()
|
py
|
1a55bd4952e950b60c643c76ef2d7f5672ce160f
|
# -*- coding: utf-8 -*-
"""
Code source: https://github.com/KaiyangZhou/deep-person-reid
"""
from __future__ import division, print_function, absolute_import
import math
import numpy as np
from itertools import repeat
from collections import namedtuple, defaultdict
import torch
__all__ = ['compute_model_complexity']
"""
Utility
"""
def _ntuple(n):
def parse(x):
if isinstance(x, int):
return tuple(repeat(x, n))
return x
return parse
_single = _ntuple(1)
_pair = _ntuple(2)
_triple = _ntuple(3)
"""
Convolution
"""
def hook_convNd(m, x, y):
k = torch.prod(torch.Tensor(m.kernel_size)).item()
cin = m.in_channels
flops_per_ele = k * cin # + (k*cin-1)
if m.bias is not None:
flops_per_ele += 1
flops = flops_per_ele * y.numel() / m.groups
return int(flops)
"""
Pooling
"""
def hook_maxpool1d(m, x, y):
flops_per_ele = m.kernel_size - 1
flops = flops_per_ele * y.numel()
return int(flops)
def hook_maxpool2d(m, x, y):
k = _pair(m.kernel_size)
k = torch.prod(torch.Tensor(k)).item()
# ops: compare
flops_per_ele = k - 1
flops = flops_per_ele * y.numel()
return int(flops)
def hook_maxpool3d(m, x, y):
k = _triple(m.kernel_size)
k = torch.prod(torch.Tensor(k)).item()
flops_per_ele = k - 1
flops = flops_per_ele * y.numel()
return int(flops)
def hook_avgpool1d(m, x, y):
flops_per_ele = m.kernel_size
flops = flops_per_ele * y.numel()
return int(flops)
def hook_avgpool2d(m, x, y):
k = _pair(m.kernel_size)
k = torch.prod(torch.Tensor(k)).item()
flops_per_ele = k
flops = flops_per_ele * y.numel()
return int(flops)
def hook_avgpool3d(m, x, y):
k = _triple(m.kernel_size)
k = torch.prod(torch.Tensor(k)).item()
flops_per_ele = k
flops = flops_per_ele * y.numel()
return int(flops)
def hook_adapmaxpool1d(m, x, y):
x = x[0]
out_size = m.output_size
k = math.ceil(x.size(2) / out_size)
flops_per_ele = k - 1
flops = flops_per_ele * y.numel()
return int(flops)
def hook_adapmaxpool2d(m, x, y):
x = x[0]
out_size = _pair(m.output_size)
k = torch.Tensor(list(x.size()[2:])) / torch.Tensor(out_size)
k = torch.prod(torch.ceil(k)).item()
flops_per_ele = k - 1
flops = flops_per_ele * y.numel()
return int(flops)
def hook_adapmaxpool3d(m, x, y):
x = x[0]
out_size = _triple(m.output_size)
k = torch.Tensor(list(x.size()[2:])) / torch.Tensor(out_size)
k = torch.prod(torch.ceil(k)).item()
flops_per_ele = k - 1
flops = flops_per_ele * y.numel()
return int(flops)
def hook_adapavgpool1d(m, x, y):
x = x[0]
out_size = m.output_size
k = math.ceil(x.size(2) / out_size)
flops_per_ele = k
flops = flops_per_ele * y.numel()
return int(flops)
def hook_adapavgpool2d(m, x, y):
x = x[0]
out_size = _pair(m.output_size)
k = torch.Tensor(list(x.size()[2:])) / torch.Tensor(out_size)
k = torch.prod(torch.ceil(k)).item()
flops_per_ele = k
flops = flops_per_ele * y.numel()
return int(flops)
def hook_adapavgpool3d(m, x, y):
x = x[0]
out_size = _triple(m.output_size)
k = torch.Tensor(list(x.size()[2:])) / torch.Tensor(out_size)
k = torch.prod(torch.ceil(k)).item()
flops_per_ele = k
flops = flops_per_ele * y.numel()
return int(flops)
"""
Non-linear activations
"""
def hook_relu(m, x, y):
# eq: max(0, x)
num_ele = y.numel()
return int(num_ele)
def hook_leakyrelu(m, x, y):
# eq: max(0, x) + negative_slope*min(0, x)
num_ele = y.numel()
flops = 3 * num_ele
return int(flops)
"""
Normalization
"""
def hook_batchnormNd(m, x, y):
num_ele = y.numel()
flops = 2 * num_ele # mean and std
if m.affine:
flops += 2 * num_ele # gamma and beta
return int(flops)
def hook_instancenormNd(m, x, y):
return hook_batchnormNd(m, x, y)
def hook_groupnorm(m, x, y):
return hook_batchnormNd(m, x, y)
def hook_layernorm(m, x, y):
num_ele = y.numel()
flops = 2 * num_ele # mean and std
if m.elementwise_affine:
flops += 2 * num_ele # gamma and beta
return int(flops)
"""
Linear
"""
def hook_linear(m, x, y):
flops_per_ele = m.in_features # + (m.in_features-1)
if m.bias is not None:
flops_per_ele += 1
flops = flops_per_ele * y.numel()
return int(flops)
__generic_flops_counter = {
# Convolution
'Conv1d': hook_convNd,
'Conv2d': hook_convNd,
'Conv3d': hook_convNd,
# Pooling
'MaxPool1d': hook_maxpool1d,
'MaxPool2d': hook_maxpool2d,
'MaxPool3d': hook_maxpool3d,
'AvgPool1d': hook_avgpool1d,
'AvgPool2d': hook_avgpool2d,
'AvgPool3d': hook_avgpool3d,
'AdaptiveMaxPool1d': hook_adapmaxpool1d,
'AdaptiveMaxPool2d': hook_adapmaxpool2d,
'AdaptiveMaxPool3d': hook_adapmaxpool3d,
'AdaptiveAvgPool1d': hook_adapavgpool1d,
'AdaptiveAvgPool2d': hook_adapavgpool2d,
'AdaptiveAvgPool3d': hook_adapavgpool3d,
# Non-linear activations
'ReLU': hook_relu,
'ReLU6': hook_relu,
'LeakyReLU': hook_leakyrelu,
# Normalization
'BatchNorm1d': hook_batchnormNd,
'BatchNorm2d': hook_batchnormNd,
'BatchNorm3d': hook_batchnormNd,
'InstanceNorm1d': hook_instancenormNd,
'InstanceNorm2d': hook_instancenormNd,
'InstanceNorm3d': hook_instancenormNd,
'GroupNorm': hook_groupnorm,
'LayerNorm': hook_layernorm,
# Linear
'Linear': hook_linear,
}
__conv_linear_flops_counter = {
# Convolution
'Conv1d': hook_convNd,
'Conv2d': hook_convNd,
'Conv3d': hook_convNd,
# Linear
'Linear': hook_linear,
}
def _get_flops_counter(only_conv_linear):
if only_conv_linear:
return __conv_linear_flops_counter
return __generic_flops_counter
def compute_model_complexity(model, input_size, verbose=False, only_conv_linear=True):
"""Returns number of parameters and FLOPs.
.. note::
(1) this function only provides an estimate of the theoretical time complexity
rather than the actual running time which depends on implementations and hardware,
and (2) the FLOPs is only counted for layers that are used at test time. This means
that redundant layers such as person ID classification layer will be ignored as it
is discarded when doing feature extraction. Note that the inference graph depends on
how you construct the computations in ``forward()``.
Args:
model (nn.Module): network model.
input_size (tuple): input size, e.g. (1, 3, 256, 128).
verbose (bool, optional): shows detailed complexity of
each module. Default is False.
only_conv_linear (bool, optional): only considers convolution
and linear layers when counting flops. Default is True.
If set to False, flops of all layers will be counted.
"""
registered_handles = []
layer_list = []
layer = namedtuple('layer', ['class_name', 'params', 'flops'])
def _add_hooks(m):
def _has_submodule(m):
return len(list(m.children())) > 0
def _hook(m, x, y):
params = sum(p.numel() for p in m.parameters())
class_name = str(m.__class__.__name__)
flops_counter = _get_flops_counter(only_conv_linear)
if class_name in flops_counter:
flops = flops_counter[class_name](m, x, y)
else:
flops = 0
layer_list.append(layer(class_name=class_name, params=params, flops=flops))
# only consider the very basic nn layer
if _has_submodule(m):
return
handle = m.register_forward_hook(_hook)
registered_handles.append(handle)
default_train_mode = model.training
model.eval().apply(_add_hooks)
input = torch.rand(input_size)
if next(model.parameters()).is_cuda:
input = input.cuda()
model(input) # forward
for handle in registered_handles:
handle.remove()
model.train(default_train_mode)
if verbose:
per_module_params = defaultdict(list)
per_module_flops = defaultdict(list)
total_params, total_flops = 0, 0
for layer in layer_list:
total_params += layer.params
total_flops += layer.flops
if verbose:
per_module_params[layer.class_name].append(layer.params)
per_module_flops[layer.class_name].append(layer.flops)
if verbose:
num_udscore = 55
print(' {}'.format('-' * num_udscore))
print(' Model complexity with input size {}'.format(input_size))
print(' {}'.format('-' * num_udscore))
for class_name in per_module_params:
params = int(np.sum(per_module_params[class_name]))
flops = int(np.sum(per_module_flops[class_name]))
print(' {} (params={:,}, flops={:,})'.format(class_name, params, flops))
print(' {}'.format('-' * num_udscore))
print(' Total (params={:,}, flops={:,})'.format(total_params, total_flops))
print(' {}'.format('-' * num_udscore))
return total_params, total_flops
|
py
|
1a55bd5eccd4d3ad946ff3c51b50c5b1ceb236b4
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle.distributed.passes import new_pass, PassManager
import paddle.distributed.fleet as fleet
from paddle.vision.models import resnet50 as resnet
import unittest
from dist_pass_test_base import DistPassTestBase
import paddle.nn as nn
import numpy as np
class TestFuseAllReducePass(DistPassTestBase):
def init(self):
if paddle.is_compiled_with_cuda():
paddle.set_flags({'FLAGS_cudnn_deterministic': 1})
self.atol = 0.0
self.rtol = 0.0
def apply_passes(self, main_prog, startup_prog):
pass_manager = PassManager([
new_pass("fuse_elewise_add_act"),
new_pass("fuse_all_reduce", {"max_memory_size": 1024 * 1024})
])
pass_manager.apply([main_prog], [startup_prog])
def test_bs_32(self):
self.check_main(batch_size=32)
def get_model(self, place, batch_size):
image = paddle.static.data(
shape=[batch_size, 3, 224, 224], dtype='float32', name='image')
label = paddle.static.data(
shape=[batch_size, 1], dtype='int64', name='label')
model = resnet(pretrained=False)
loss_fn = nn.loss.CrossEntropyLoss()
pred_out = model(image)
loss = loss_fn(pred_out, label)
optimizer = paddle.optimizer.Adam(learning_rate=1e-3)
dist_strategy = fleet.DistributedStrategy()
dist_strategy.fuse_all_reduce_ops = False
dist_strategy.without_graph_optimization = True
fleet.init(is_collective=True, strategy=dist_strategy)
optimizer = fleet.distributed_optimizer(optimizer)
optimizer.minimize(loss)
rank = paddle.distributed.get_rank()
def reader():
np.random.seed(self.seed + rank)
for _ in range(10):
image_np = np.random.random(size=image.shape).astype('float32')
label_np = np.random.randint(
low=0, high=1000, size=label.shape).astype('int64')
yield image_np, label_np
main_program = paddle.static.default_main_program()
startup_program = paddle.static.default_startup_program()
return main_program, startup_program, [image, label], [loss], reader
if __name__ == "__main__":
unittest.main()
|
py
|
1a55be6c6ad819c39b8ff0497e958d9a29e89bfd
|
import pytest
from django.conf import settings
from django.test import RequestFactory
from seatrekking.users.views import UserRedirectView, UserUpdateView
pytestmark = pytest.mark.django_db
class TestUserUpdateView:
"""
TODO:
extracting view initialization code as class-scoped fixture
would be great if only pytest-django supported non-function-scoped
fixture db access -- this is a work-in-progress for now:
https://github.com/pytest-dev/pytest-django/pull/258
"""
def test_get_success_url(
self, user: settings.AUTH_USER_MODEL, request_factory: RequestFactory
):
view = UserUpdateView()
request = request_factory.get("/fake-url/")
request.user = user
view.request = request
assert view.get_success_url() == f"/users/{user.username}/"
def test_get_object(
self, user: settings.AUTH_USER_MODEL, request_factory: RequestFactory
):
view = UserUpdateView()
request = request_factory.get("/fake-url/")
request.user = user
view.request = request
assert view.get_object() == user
class TestUserRedirectView:
def test_get_redirect_url(
self, user: settings.AUTH_USER_MODEL, request_factory: RequestFactory
):
view = UserRedirectView()
request = request_factory.get("/fake-url")
request.user = user
view.request = request
assert view.get_redirect_url() == f"/users/{user.username}/"
|
py
|
1a55c007dca16bd6a675c2c8a51cf42a2b631ea4
|
import os
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
BASE_DIR = PACKAGE_ROOT
DEBUG = True
TEMPLATE_DEBUG = DEBUG
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "dev.db",
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "UTC"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = int(os.environ.get("SITE_ID", 1))
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PACKAGE_ROOT, "site_media", "media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = "/site_media/media/"
# Absolute path to the directory static files should be collected to.
# Don"t put anything in this directory yourself; store your static files
# in apps" "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PACKAGE_ROOT, "site_media", "static")
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/site_media/static/"
# Additional locations of static files
STATICFILES_DIRS = [
os.path.join(PACKAGE_ROOT, "static"),
]
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# Make this unique, and don't share it with anybody.
SECRET_KEY = "yc)7(o27pavgtr&$3i3si@d8zulc&0b#1kpof7f%jo@&+vlc$g"
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
]
TEMPLATE_CONTEXT_PROCESSORS = [
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.core.context_processors.request",
"django.contrib.messages.context_processors.messages",
"account.context_processors.account",
"pinax_theme_bootstrap.context_processors.theme",
]
MIDDLEWARE_CLASSES = [
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
]
ROOT_URLCONF = "rakesh.urls"
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = "rakesh.wsgi.application"
TEMPLATE_DIRS = [
os.path.join(PACKAGE_ROOT, "templates"),
]
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.messages",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.staticfiles",
# theme
"bootstrapform",
"pinax_theme_bootstrap",
# external
"account",
"eventlog",
"metron",
"easy_thumbnails",
"kaleo",
"teams",
"wiki",
# project
"rakesh",
"rakesh.profiles",
]
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {
"require_debug_false": {
"()": "django.utils.log.RequireDebugFalse"
}
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler"
}
},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
}
}
FIXTURE_DIRS = [
os.path.join(PROJECT_ROOT, "fixtures"),
]
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
ACCOUNT_OPEN_SIGNUP = True
ACCOUNT_USE_OPENID = False
ACCOUNT_REQUIRED_EMAIL = False
ACCOUNT_EMAIL_UNIQUE = True
ACCOUNT_EMAIL_CONFIRMATION_REQUIRED = False
ACCOUNT_LOGIN_REDIRECT_URL = "home"
ACCOUNT_LOGOUT_REDIRECT_URL = "home"
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 2
WIKI_HOOKSET = "rakesh.hooks.ProjectWikiHookset"
WIKI_BINDERS = [
"rakesh.binders.UserBinder",
"rakesh.binders.TeamBinder"
]
AUTHENTICATION_BACKENDS = [
"account.auth_backends.UsernameAuthenticationBackend",
"django.contrib.auth.backends.ModelBackend"
]
AUTH_PROFILE_MODULE = "profiles.Profile"
|
py
|
1a55c059946214e3a29ef09d156abd0ac016dc15
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import time
try:
import frida
except ImportError:
sys.exit('install frida\nsudo python3 -m pip install frida')
def err(msg):
sys.stderr.write(msg + '\n')
def on_message(message, data):
if message['type'] == 'error':
err('[!] ' + message['stack'])
elif message['type'] == 'send':
print('[+] ' + message['payload'])
else:
print(message)
def kill_process(target_process):
cmd = 'adb shell pm clear {} 1> /dev/null'.format(target_process)
os.system(cmd)
def main():
target_process = sys.argv[1]
#kill_process(target_process)
device = frida.get_usb_device()
try:
started = False
session = device.attach(target_process)
except frida.ProcessNotFoundError:
print('Starting process {}...\n'.format(target_process))
started = True
try:
pid = device.spawn([target_process])
except frida.NotSupportedError:
sys.exit('An error ocurred while attaching with the procces\n')
session = device.attach(pid)
script = session.create_script("""
Java.perform(function () {
var Log = Java.use("android.util.Log");
Log.e.overload('java.lang.String', 'java.lang.String').implementation = function (tag, entry) {
console.log('Log.e( ' + tag + ', ' + entry + ' )');
console.log('');
return this.e.apply(this, arguments);
}
Log.w.overload('java.lang.String', 'java.lang.String').implementation = function (tag, entry) {
console.log('Log.w( ' + tag + ', ' + entry + ' )');
console.log('');
return this.w.apply(this, arguments);
}
Log.i.overload('java.lang.String', 'java.lang.String').implementation = function (tag, entry) {
console.log('Log.i( ' + tag + ', ' + entry + ' )');
console.log('');
return this.i.apply(this, arguments);
}
Log.d.overload('java.lang.String', 'java.lang.String').implementation = function (tag, entry) {
console.log('Log.d( ' + tag + ', ' + entry + ' )');
console.log('');
return this.d.apply(this, arguments);
}
});
""")
script.on('message', on_message)
print('[!] Press <Enter> at any time to detach from instrumented program.\n\n')
script.load()
if started:
device.resume(pid)
input()
session.detach()
if __name__ == '__main__':
if len(sys.argv) != 2:
usage = 'usage {} <process name or PID>\n\n'.format(__file__)
sys.exit(usage)
main()
|
py
|
1a55c069193c48b5e3225db345b83a024f818e25
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encryption and digest functionality."""
__author__ = 'Mike Gainer ([email protected])'
import base64
import hashlib
import hmac
import os
import random
import time
from Crypto.Cipher import AES
import appengine_config
from common import utils
from models import config
from google.appengine.api import users
XSRF_SECRET_LENGTH = 20
XSRF_SECRET = config.ConfigProperty(
'gcb_xsrf_secret', str, (
'Text used to encrypt tokens, which help prevent Cross-site request '
'forgery (CSRF, XSRF). You can set the value to any alphanumeric text, '
'preferably using 16-64 characters. Once you change this value, the '
'server rejects all subsequent requests issued using an old value for '
'this variable.'),
'course builder XSRF secret')
ENCRYPTION_SECRET_LENGTH = 48
ENCRYPTION_SECRET = config.ConfigProperty(
'gcb_encryption_secret', str, (
'Text used to encrypt messages. You can set this to any text at all, '
'but the value must be exactly ' + str(ENCRYPTION_SECRET_LENGTH) +
' characters long. If you change this value, the server will be '
'unable to understand items encrypted under the old key.'),
'default value of CourseBuilder encryption secret',
validator=config.ValidateLength(ENCRYPTION_SECRET_LENGTH).validator)
class EncryptionManager(object):
@classmethod
def _init_secret_if_none(cls, cfg, length):
# Any non-default value is fine.
if cfg.value and cfg.value != cfg.default_value:
return
# All property manipulations must run in the default namespace.
with utils.Namespace(appengine_config.DEFAULT_NAMESPACE_NAME):
# Look in the datastore directly.
entity = config.ConfigPropertyEntity.get_by_key_name(cfg.name)
if not entity:
entity = config.ConfigPropertyEntity(key_name=cfg.name)
# Any non-default non-None value is fine.
if (entity.value and not entity.is_draft and
(str(entity.value) != str(cfg.default_value))):
return
# Initialize to random value.
entity.value = base64.urlsafe_b64encode(
os.urandom(int(length * 0.75)))
entity.is_draft = False
entity.put()
@classmethod
def _get_hmac_secret(cls):
"""Verifies that non-default XSRF secret exists; creates one if not."""
cls._init_secret_if_none(XSRF_SECRET, XSRF_SECRET_LENGTH)
return XSRF_SECRET.value
@classmethod
def _get_encryption_secret(cls):
"""Verifies non-default encryption secret exists; creates one if not."""
cls._init_secret_if_none(ENCRYPTION_SECRET, ENCRYPTION_SECRET_LENGTH)
return ENCRYPTION_SECRET.value
@classmethod
def hmac(cls, components):
"""Generate an XSRF over the array of components strings."""
secret = cls._get_hmac_secret()
digester = hmac.new(str(secret))
for component in components:
digester.update(component)
return digester.digest()
@classmethod
def _build_crypto(cls, secret):
if len(secret) != 48:
raise ValueError('Encryption secret must be exactly 48 characters')
return AES.new(secret[:32], AES.MODE_CBC, secret[32:])
@classmethod
def encrypt(cls, message, secret=None):
"""Encrypt a message. Message value returned is not URL-safe."""
message = message or ''
message = '%d.%s' % (len(message), message)
message += '^' * (16 - len(message) % 16)
secret = secret or cls._get_encryption_secret()
return cls._build_crypto(secret).encrypt(message)
@classmethod
def encrypt_to_urlsafe_ciphertext(cls, message, secret=None):
"""Convenience wrapper to get URL-safe version of encrytped data."""
return base64.urlsafe_b64encode(cls.encrypt(message, secret))
@classmethod
def decrypt(cls, message, secret=None):
"""Decrypt a message, returning the original plaintext."""
secret = secret or cls._get_encryption_secret()
crypto = cls._build_crypto(secret)
message = crypto.decrypt(message)
delim_index = message.find('.')
original_length = int(message[:delim_index])
return message[delim_index + 1:delim_index + 1 + original_length]
@classmethod
def decrypt_from_urlsafe_ciphertext(cls, message, secret=None):
return cls.decrypt(base64.urlsafe_b64decode(message), secret)
class XsrfTokenManager(object):
"""Provides XSRF protection by managing action/user tokens in memcache."""
# Max age of the token (4 hours).
XSRF_TOKEN_AGE_SECS = 60 * 60 * 4
# Token delimiters.
DELIMITER_PRIVATE = ':'
DELIMITER_PUBLIC = '/'
# Default nickname to use if a user does not have a nickname,
USER_ID_DEFAULT = 'default'
@classmethod
def _create_token(cls, action_id, issued_on):
"""Creates a string representation (digest) of a token."""
# We have decided to use transient tokens stored in memcache to reduce
# datastore costs. The token has 4 parts: hash of the actor user id,
# hash of the action, hash of the time issued and the plain text of time
# issued.
# Lookup user id.
user = users.get_current_user()
if user:
user_id = user.user_id()
else:
user_id = cls.USER_ID_DEFAULT
# Round time to seconds.
issued_on = long(issued_on)
digest = EncryptionManager.hmac(
cls.DELIMITER_PRIVATE.join([
str(user_id), str(action_id), str(issued_on)]))
token = '%s%s%s' % (
issued_on, cls.DELIMITER_PUBLIC, base64.urlsafe_b64encode(digest))
return token
@classmethod
def create_xsrf_token(cls, action):
return cls._create_token(action, time.time())
@classmethod
def is_xsrf_token_valid(cls, token, action):
"""Validate a given XSRF token by retrieving it from memcache."""
try:
parts = token.split(cls.DELIMITER_PUBLIC)
if len(parts) != 2:
return False
issued_on = long(parts[0])
age = time.time() - issued_on
if age > cls.XSRF_TOKEN_AGE_SECS:
return False
authentic_token = cls._create_token(action, issued_on)
if authentic_token == token:
return True
return False
except Exception: # pylint: disable=broad-except
return False
def get_external_user_id(app_id, namespace, email):
"""Gets an id for a user that can be transmitted to external systems.
The returned key is scoped to a particular user within a particular course
on a particular Course Builder deployment, and is guaranteed to be
statistically unique within that scope.
Args:
app_id: string. Application ID of the CB App Engine deployment.
namespace: string. Namespace of a single course. May be the empty string.
email: string. Unvalidated email address for a user.
Returns:
String.
"""
return hmac.new(
'%s%s%s' % (app_id, namespace, email), digestmod=hashlib.sha256
).hexdigest()
def hmac_sha_2_256_transform(privacy_secret, value):
"""HMAC-SHA-2-256 for use as a privacy transformation function."""
return hmac.new(
str(privacy_secret), msg=str(value), digestmod=hashlib.sha256
).hexdigest()
def generate_transform_secret_from_xsrf_token(xsrf_token, action):
"""Deterministically generate a secret from an XSRF 'nonce'.
When multiple data sources are being via the REST API, consumers
may need to correlate data across the different sources. To take
a particular example, the analytics page on the dashboard is one
such consumer. This function provides a convenient way to turn an
opaque, non-forgeable XSRF token internally into an HMAC secret.
The main point here is that the secret string used for HMAC'ing
the PII in the data source outputs is
- Not derived from anything the user may generate, so the user
cannot manipulate the seed value to experiment to find weaknesses.
- Not predictable given the information the user has. (The user does
not have the encryption key.) The encryption key is used in preference
to using the HMAC key twice.
Args:
xsrf_token: An XSRF token encoded as usual for use as an
HTML parameter.
action: Action expected to be present in the token.
Returns:
None if the XSRF token is invalid, or an encryption key if it is.
"""
if not XsrfTokenManager.is_xsrf_token_valid(xsrf_token, action):
return None
# Encrypt the publicly-visible xsrf parameter with our private
# encryption secret so that we now have a string which is
# - Entirely deterministic
# - Not generatable by anyone not in posession of the encryption secret.
seed_string = EncryptionManager.encrypt(xsrf_token)
seed = 0
for c in seed_string:
seed *= 256
seed += ord(c)
r = random.Random(seed)
# Use the random seed to deterministically generate a secret which
# will be consistent for identical values of the HMAC token.
return base64.urlsafe_b64encode(
''.join(chr(r.getrandbits(8)) for unused in range(
int(ENCRYPTION_SECRET_LENGTH * 0.75))))
def _digit32_to_char(digit32):
assert digit32 >= 0 and digit32 < 32
if digit32 < 10:
return chr(ord('0') + digit32)
return chr(ord('A') + digit32-10)
def _char_to_digit_32(char):
if char >= '0' and char <= '9':
return ord(char) - ord('0')
return ord(char.upper()) - ord('A')
HMAC_KEY='a;sldkjaweruoalksjdf'
def verify_access_code(access_code):
access_code = access_code.strip()
if len(access_code) != 6:
return False
to_encode = access_code[0:3].upper()
crypt = hmac.new(HMAC_KEY, to_encode)
encoded = base64.b32encode(crypt.digest())[0:3]
return encoded == access_code[3:6].upper()
def generate_access_code():
""" Generates a not very secure, but secure enough for 49 CHF 6 character access code.
The first 3 characters are random base 32 and the next 3 are the prefix of their MD5 HMAC """
to_encode = _digit32_to_char(random.randrange(32)) + _digit32_to_char(random.randrange(32)) + _digit32_to_char(random.randrange(32))
crypt = hmac.new(HMAC_KEY, to_encode)
encoded = base64.b32encode(crypt.digest())[0:3]
return to_encode+encoded
def main():
print generate_access_code()
print verify_access_code(generate_access_code())
print verify_access_code(generate_access_code().lower())
if __name__ == "__main__":
main()
|
py
|
1a55c10ee323bae9cf7a99ffce6df5f354feb625
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
############
class CloudifyCliError(Exception):
pass
class CloudifyBootstrapError(Exception):
pass
class CloudifyValidationError(Exception):
pass
class SuppressedCloudifyCliError(Exception):
pass
class ExecutionTimeoutError(RuntimeError):
def __init__(self, execution_id, message):
self.execution_id = execution_id
self.message = message
class EventProcessingTimeoutError(RuntimeError):
def __init__(self, execution_id, message):
self.execution_id = execution_id
self.message = message
|
py
|
1a55c1452763fe4c9539e1296e2381286cb9922a
|
import asyncio
import base64
import binascii
import hashlib
import json
import logging
import os
import random
import requests
import sys
import time
from urllib.parse import urlparse
from qrcode import QRCode
from aiohttp import ClientError
from uuid import uuid4
from datetime import date
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # noqa
from runners.support.agent import DemoAgent, default_genesis_txns
from runners.support.utils import (
log_json,
log_msg,
log_status,
log_timer,
prompt,
prompt_loop,
require_indy,
)
CRED_PREVIEW_TYPE = (
"did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/issue-credential/1.0/credential-preview"
)
SELF_ATTESTED = os.getenv("SELF_ATTESTED")
LOGGER = logging.getLogger(__name__)
TAILS_FILE_COUNT = int(os.getenv("TAILS_FILE_COUNT", 100))
class VSWAgent(DemoAgent):
def __init__(
self,
http_port: int,
admin_port: int,
tails_server_base_url: str = None,
**kwargs,
):
super().__init__(
"VSW.Agent",
http_port,
admin_port,
prefix="VSW",
tails_server_base_url=tails_server_base_url,
extra_args=["--auto-accept-invites", "--auto-accept-requests"],
**kwargs,
)
self.connection_id = None
self._connection_ready = asyncio.Future()
self.cred_state = {}
self.cred_done = asyncio.Future()
# TODO define a dict to hold credential attributes
# based on credential_definition_id
self.cred_attrs = {}
self.proof_done = asyncio.Future()
async def detect_connection(self):
await self._connection_ready
async def credential_complete(self):
await self.cred_done
async def proof_complete(self):
await self.proof_done
@property
def connection_ready(self):
return self._connection_ready.done() and self._connection_ready.result()
async def handle_connections(self, message):
if message["connection_id"] == self.connection_id:
if message["state"] == "active" and not self._connection_ready.done():
self.log("Connected")
self._connection_ready.set_result(True)
async def handle_present_proof(self, message):
state = message["state"]
presentation_exchange_id = message["presentation_exchange_id"]
self.log(
"Presentation: state =",
state,
", presentation_exchange_id =",
presentation_exchange_id,
)
if state == "presentation_received":
log_status("#27 Process the proof provided by X")
log_status("#28 Check if proof is valid")
proof = await self.admin_POST(
f"/present-proof/records/{presentation_exchange_id}/verify-presentation"
)
self.log("Proof =", proof["verified"])
# if presentation is a vsw schema (app publication),
# check the values received
pres = message["presentation"]
self.log("pres:", pres)
name = pres['requested_proof']['revealed_attrs']['0_name_uuid']['raw']
url = pres['requested_proof']['revealed_attrs']['0_url_uuid']['raw']
digest = pres['requested_proof']['revealed_attrs']['0_digest_uuid']['raw']
response = requests.get(url, allow_redirects=True)
if response.status_code != 200:
print("Failed to download file from URL")
sys.exit(1)
computed = hashlib.sha256(response.content).hexdigest()
if computed != digest:
print("SHA does not match")
print(computed)
sys.exit(1)
else:
open(f'vsw/{name}.wasm', 'wb').write(response.content)
self.log("SUCCESS")
self.proof_done.set_result(True)
async def handle_basicmessages(self, message):
self.log("Received message:", message["content"])
async def main(
start_port: int,
name: str,
show_timing: bool = False,
):
with open('/home/indy/vsw/.config.json') as f:
config = json.load(f)
genesis = await default_genesis_txns()
if not genesis:
print("Error retrieving ledger genesis transactions")
sys.exit(1)
agent = None
try:
log_status("#1 Provision an agent and wallet, get back configuration details")
agent = VSWAgent(
start_port,
start_port + 1,
genesis_data=genesis,
timing=show_timing,
)
await agent.listen_webhooks(start_port + 2)
# FIXME: This user should not have to publish their DID, but if I remove the next line it fails
await agent.register_did()
with log_timer("Startup duration:"):
await agent.start_process()
log_msg("Admin URL is at:", agent.admin_url)
log_msg("Endpoint URL is at:", agent.endpoint)
# Connect to repo
log_status("#9 Connect to repo")
connection = await agent.admin_POST("/connections/receive-invitation", config['invitation'])
agent.connection_id = connection["connection_id"]
log_json(connection, label="Invitation response:")
await agent.detect_connection()
log_status("#20 Request app credential from repo")
req_attrs = [
{
"name": "name",
"value": name,
"restrictions": [{"schema_name": "vsw schema"}]
},
{
"name": "url",
"restrictions": [{"schema_name": "vsw schema"}]
},
{
"name": "digest",
"restrictions": [{"schema_name": "vsw schema"}]
}
]
req_preds = []
indy_proof_request = {
"name": "Retrieve by Name",
"version": "1.0",
"nonce": str(uuid4().int),
"requested_attributes": {
f"0_{req_attr['name']}_uuid": req_attr
for req_attr in req_attrs
},
"requested_predicates": {}
}
proof_request_web_request = {
"connection_id": agent.connection_id,
"proof_request": indy_proof_request
}
# this sends the request to our agent, which forwards it to the repo
# (based on the connection_id)
await agent.admin_POST(
"/present-proof/send-request",
proof_request_web_request
)
await agent.proof_complete()
finally:
terminated = True
try:
if agent:
await agent.terminate()
except Exception:
LOGGER.exception("Error terminating agent:")
terminated = False
await asyncio.sleep(0.1)
if not terminated:
os._exit(1)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Runs a VSW agent.")
parser.add_argument(
"-p",
"--port",
type=int,
default=8050,
metavar=("<port>"),
help="Choose the starting port number to listen on",
)
parser.add_argument(
"--timing", action="store_true", help="Enable timing information"
)
parser.add_argument("name", type=str, help="name of app to install")
args = parser.parse_args()
ENABLE_PYDEVD_PYCHARM = os.getenv("ENABLE_PYDEVD_PYCHARM", "").lower()
ENABLE_PYDEVD_PYCHARM = ENABLE_PYDEVD_PYCHARM and ENABLE_PYDEVD_PYCHARM not in (
"false",
"0",
)
PYDEVD_PYCHARM_HOST = os.getenv("PYDEVD_PYCHARM_HOST", "localhost")
PYDEVD_PYCHARM_CONTROLLER_PORT = int(
os.getenv("PYDEVD_PYCHARM_CONTROLLER_PORT", 5001)
)
if ENABLE_PYDEVD_PYCHARM:
try:
import pydevd_pycharm
print(
"VSW remote debugging to "
f"{PYDEVD_PYCHARM_HOST}:{PYDEVD_PYCHARM_CONTROLLER_PORT}"
)
pydevd_pycharm.settrace(
host=PYDEVD_PYCHARM_HOST,
port=PYDEVD_PYCHARM_CONTROLLER_PORT,
stdoutToServer=True,
stderrToServer=True,
suspend=False,
)
except ImportError:
print("pydevd_pycharm library was not found")
require_indy()
try:
asyncio.get_event_loop().run_until_complete(
main(
args.port,
args.name,
args.timing,
)
)
except KeyboardInterrupt:
os._exit(1)
|
py
|
1a55c161fa8a136f0ee9db41182888dbff9540c6
|
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------------
# practical1.py
# Created on: 2018-01-23 16:04:05.00000
# (generated by ArcGIS/ModelBuilder)
# Usage: practical1 <Input_Features> <Distance__value_or_field_> <Layer> <Intersect__3_> <Output_Feature_Class>
# Description:
# ---------------------------------------------------------------------------
# Import arcpy module
import arcpy
# Script arguments
Input_Features = arcpy.GetParameterAsText(0)
Distance__value_or_field_ = arcpy.GetParameterAsText(1)
Layer = arcpy.GetParameterAsText(2)
Intersect__3_ = arcpy.GetParameterAsText(3)
if Intersect__3_ == '#' or not Intersect__3_:
Intersect__3_ = "\\\\ds.leeds.ac.uk\\student\\student13\\gy17cev\\ArcGIS\\Default.gdb\\Intersect" # provide a default value if unspecified
Output_Feature_Class = arcpy.GetParameterAsText(4)
# Local variables:
# Process: Buffer
arcpy.Buffer_analysis(Input_Features, Output_Feature_Class, Distance__value_or_field_, "FULL", "ROUND", "NONE", "", "PLANAR")
# Process: Intersect
arcpy.Intersect_analysis([Layer,Output_Feature_Class], Intersect__3_, "ALL", "", "INPUT")
|
py
|
1a55c2a937833856cf82918909a69317a5834693
|
a= input("lanjut(y/t)?")
if a in ["y","Y"]:
print("baiklah")
else:
print("okelah...")
|
py
|
1a55c37c3d332fcd4137cd1cc048cc04d2443452
|
# -*- coding: utf-8 -*-
"""
module to do uiauto
"""
import json
import re
import codecs
import time
from urllib2 import URLError
from appium import webdriver
from selenium.common.exceptions import WebDriverException
from logger import logger
from emulator import ADB
from db import DB
from smartmonkey import Navigator, Stabilizer
from myexceptions import PathNotDefinedInConfig, IdPHandlingException, EmulatorActionException,\
TestInitException
RP_REQUEST_TIMEOUT = 20
IDP_STATUS_TIMEOUT = 20
class UIAction(object):
"""class to simplify ui action process"""
def __init__(self, driver, emulator=None, idp=None, config_file=None, package=None, version=None):
if 'udid' in driver.desired_capabilities and driver.desired_capabilities['udid']:
self.adb = ADB(serial=driver.desired_capabilities['udid'])
elif emulator and emulator.serial:
self.adb = ADB(serial=emulator.serial)
else:
self.adb = ADB()
if not package:
package = self.adb.current_package()
version = self.adb.current_version(package)
self.emulator = emulator
self.package = package
self.driver = driver
self.idp = idp
self.app_style = 'international' if self.idp == 'fb' else 'chinese'
self.config_file = config_file
self.config = {}
if config_file:
self.loaded = self.load_config_from_file(config_file)
else:
if not idp:
raise Exception("IdP not specified")
self.loaded = self.load_config_from_db(package, idp, version=version)
if self.loaded:
if 'home_activity' in self.config and self.config['home_activity']:
self.has_home_activity = True
else:
self.has_home_activity = False
self.stabilizer = Stabilizer(self.driver, package=self.package, app_style=self.app_style)
def load_config_from_db(self, package, idp, version=None):
"""load configuration from database"""
config = DB().fetch_config(package, idp, version=version)
if not config:
return False
else:
logger.debug(u'Config for %s loaded from DB', package)
return self.set_config(config)
def load_config_from_file(self, filename):
"""load configuration from config file"""
try:
with open(filename, 'r') as config_f:
result = self.set_config(config_f.read())
logger.debug(u'Config for %s loaded from %s', self.config['package'], filename)
return result
except EnvironmentError:
logger.error(u'Read file error: %s', filename)
return False
def set_config(self, config):
"""initialize configuration from json"""
try:
if isinstance(config, str):
config = json.loads(config)
# check required objects
package_name = config['package']
package_version = config['version']
installed_version = self.adb.current_version(package_name)
config_version = package_version
if installed_version != config_version:
logger.warning(u'Version inconsistent - Installed: %s, Config: %s',\
installed_version, config_version)
self.config = config
return True
except ValueError:
logger.error(u'Invalid path format')
raise
def login(self):
"""perform navigation to get to login page"""
assert self.loaded
login_path = self.path_for('login')
origin = self.origin_for('login')
if origin:
self.stabilizer.better_start_activity(origin)
else:
self.start_home_activity()
logger.info(u"[+] Navigate for login")
loginer = Navigator(self.driver, path=login_path, package=self.config['package'],\
app_style=self.app_style)
return loginer.navigate()
def login_from_snapshot(self, tag):
"""restore session from snapshot"""
# check emulator
if not self.emulator:
raise EmulatorActionException('No emulator instance is specified')
# check snapshot
tags_in_use = [ x['tag'] for x in self.emulator.list_snapshot()]
if tag not in tags_in_use:
raise EmulatorActionException('No snapshot with tag {}'.format(tag))
# try to load snapshot
if not self.emulator.load_snapshot(tag):
raise EmulatorActionException('Fail to load snapshot {}'.format(tag))
# try to restore appium session
desired_caps = self.driver.capabilities
desired_caps['autoLaunch'] = False
try:
self.driver = webdriver.Remote(self.driver.command_executor._url, desired_caps)
except URLError:
raise TestInitException('appium is not running')
# try to login
if self.idp == 'fb':
status = 'IdpNeedLogin'
return status
def logout(self, reset=False):
"""perform logout action"""
assert self.loaded
self.clear_sdcard()
if reset:
logger.info(u"[+] App reset")
return self.driver.reset()
else:
logout_path = self.path_for('logout')
origin = self.origin_for('logout')
if origin:
self.stabilizer.better_start_activity(origin)
else:
self.start_home_activity()
logger.info(u"[+] Navigate for logout")
logoutter = Navigator(self.driver, path=logout_path, package=self.config['package'],\
app_style=self.app_style)
return logoutter.navigate()
def clear_sdcard(self):
"""clear sdcard and keep only essential files"""
files = self.adb.ls('/sdcard/')
files_reserved = ['Android', 'DCIM', 'Download', 'Movies', 'Music', 'Notifications', 'Pictures',
'Podcasts', 'Ringtones', 'UxinSDK', 'backups', 'sina', 'tencent']
for fname in files:
if fname in files_reserved:
continue
self.adb.rm('/sdcard/{}'.format(fname))
def user_info(self):
"""retrieve user info"""
assert self.loaded
info_path = self.path_for('user_info')
origin = self.origin_for('user_info')
if origin:
self.stabilizer.better_start_activity(origin)
else:
self.start_home_activity()
logger.info(u"[+] Navigate for user info")
user_getter = Navigator(self.driver, path=info_path, package=self.config['package'],\
app_style=self.app_style)
status = user_getter.navigate()
if status == 'LoggedIn':
identities = self.config['paths']['user_info']['identities']
for (k, val) in identities.items():
if re.search(val, self.page_source, re.I):
return k
return 'Others'
else:
return status
# ----------------- Single Destination -------------------
# match = re.search(self.config['paths']['user_info']['identity_regex'],
# self.page_source)
# if len(match.groups()) > 0:
# return match.group(1)
# else:
# return match.group(0)
# [ example_regex: "(?=<[^<]*user_name[^<]*>)<.*?text=\"(.*?)\".*?>" ]
def landing(self):
"""land on home activity"""
home_activity = self.stabilizer.get_home_activity()
if self.loaded:
if self.has_home_activity and self.config['home_activity'] != home_activity:
logger.warning(u'home_activity already exists in config, skip record update\n'
u'\tstored: %s, new: %s', self.config['home_activity'],\
home_activity)
else:
self.has_home_activity = True
self.config['home_activity'] = home_activity
if self.config_file:
self.config['home_activity'] = home_activity
with open(self.config_file, 'wb') as config_f:
config_f = codecs.getwriter('utf-8')(config_f)
json.dump(self.config, config_f, indent=4, sort_keys=True,\
ensure_ascii=False)
else:
result = DB().update_config(self.config['package'], self.config['idp'],\
{'home_activity': home_activity}, version=self.config['version'])
if result:
logger.info(u'home_activity:%s stored into config', home_activity)
else:
logger.info(u'Landed on %s', home_activity)
return home_activity
def start_home_activity(self, is_retry=False):
"""better start home activity"""
if self.loaded and self.has_home_activity:
home_activity = self.config['home_activity']
else:
logger.debug(u'home_activity not defined in DB')
home_activity = self.landing()
if self.stabilizer.better_start_activity(home_activity):
return True
else:
if is_retry:
logger.warning('uiaction: start_home_activity mismatch')
return False
else:
self.stabilizer.skip_irrelevant()
if self.driver.current_activity == home_activity:
return True
else:
return self.start_home_activity(is_retry=True)
def origin_for(self, action):
"""find origin of the action"""
if action not in self.config['paths']:
return False
if 'origin' in self.config['paths'][action]:
return self.config['paths'][action]['origin']
else:
return False
def path_for(self, action):
"""find path to the action"""
if action in self.config['paths'] and self.config['paths'][action]:
return json.dumps(self.config['paths'][action])
else:
raise PathNotDefinedInConfig(u"%s not configured for %s - %s"
% (action, self.config['package'], self.config['version']))
def fblite_login_handler(self, stab, account=None, password=None):
"""handler for fblite login"""
if not account or not password:
account = "[email protected]"
password = "evessotest"
# very ugly wait for status change
logger.debug(u'Wait for status change')
time.sleep(IDP_STATUS_TIMEOUT)
# if session is stored
if self.driver.current_activity != 'com.facebook.browser.lite.BrowserLiteActivity':
logger.debug(u'Session is stored')
return True
# click continue
logger.debug(u'Try to click continue')
stab.find_elements_by_keyword(u'Continue', clickable_only=True,\
exact=False)[-1].click()
# wait for getting out of fblite
count = 0
while self.driver.current_activity == 'com.facebook.browser.lite.BrowserLiteActivity':
time.sleep(1)
count += 1
assert count <= IDP_STATUS_TIMEOUT
logger.debug(u'Get out of fblite')
return True
def fb_login_handler(self, stab, account=None, password=None):
"""handler for facebook webview login"""
if not account or not password:
account = "[email protected]"
password = "evessotest"
keywords = [u"Enter email", u"请输入邮箱", u"輸入電郵"]
err_keywords = [u'Error', u'Invalid', u'应用编号无效']
block_keywords = [u'Secure Account']
# wait for correct activity
logger.debug(u'Wait for facebook login activity')
count = 0
activity = self.driver.current_activity
while activity != 'com.facebook.FacebookActivity' and activity != 'com.facebook.LoginActivity':
count += 1
activity = self.driver.current_activity
assert count <= IDP_STATUS_TIMEOUT
# wait for input boxes appear
logger.debug(u'Wait for input boxes appear')
count = 0
while not self.driver.find_elements_by_class_name('android.widget.EditText'):
# in case app does not support fb login at all
if any(kw in self.page_source for kw in err_keywords):
raise IdPHandlingException('This app does not support facebook login')
time.sleep(1)
count += 1
assert count <= IDP_STATUS_TIMEOUT
# input email and password
source = self.page_source
logger.debug(u'Try to input email and password')
if any(kw in source for kw in keywords):
self.driver.find_elements_by_class_name('android.widget.EditText')[0]\
.set_text(account)
self.driver.find_elements_by_class_name('android.widget.Button')[-1].click()
self.driver.find_elements_by_class_name('android.widget.EditText')[-1]\
.set_text(password)
self.driver.find_elements_by_class_name('android.widget.Button')[-1].click()
elif any(kw in source for kw in err_keywords):
raise IdPHandlingException('This app does not support facebook login')
else:
inputs = self.driver.find_elements_by_class_name('android.widget.EditText')
inputs[0].set_text(account)
inputs[-1].set_text(password)
self.driver.find_elements_by_class_name('android.widget.Button')[-1].click()
# wait for status change
logger.debug(u'Wait for status change')
status_keywords = [u'身分繼續', u'Continue', u'would like to'] + err_keywords + block_keywords
count = 0
while not any(kw in self.page_source for kw in status_keywords):
time.sleep(1)
count += 1
assert count <= IDP_STATUS_TIMEOUT
# handle pages after status change
count = 0
logger.debug(u'Try to handle pages after status change')
while self.driver.current_activity == 'com.facebook.FacebookActivity'\
or self.driver.current_activity == 'com.facebook.LoginActivity':
count += 1
source = self.page_source
# in case of any error
if any(kw in source for kw in err_keywords):
logger.debug(u'Error keywords received')
raise IdPHandlingException('This app does not support facebook login')
# in case of account has been blocked
elif any(kw in self.page_source for kw in block_keywords):
raise IdPHandlingException('This account has been blocked!')
# in case of continue appears
elif 'Continue' in source:
logger.debug(u'Try to click Continue')
stab.find_elements_by_keyword(u'Continue', clickable_only=True,\
exact=False)[-1].click()
# give all possible permisson to the app
elif 'would like to' in source:
logger.debug(u'Try to offer permission by clicking OK')
stab.find_elements_by_keyword(u'OK', clickable_only=True, exact=True)[-1].click()
time.sleep(1)
assert count <= IDP_STATUS_TIMEOUT
logger.debug(u'Get out of facebook login webview')
return True
def idp_handler(self, status, account=None, password=None):
"""handler idp login process"""
logger.info(u"RP login status: %s, idp: %s", status, self.idp)
stab = Stabilizer(self.driver)
if status == "Uncertain":
if self.idp == 'sina' and stab.wait_for_activities('.SSOAuthorizeActivity'):
status = "IdpNeedConfirm"
elif self.idp == 'wechat' and stab.wait_for_keyword(u'微信登录|登录后应用将获得以下权限', timeout=60):
status = "IdpNeedConfirm"
elif self.idp == 'fb' and stab.wait_for_keyword(u'登录 Facebook 帐户', timeout=60):
status = "IdpNeedLogin"
else:
return
if status == "IdpNeedConfirm" and self.idp == 'sina':
if self.driver.current_activity == '.SSOAuthorizeActivity' and \
stab.wait_for_keyword(u'确定', timeout=60):
stab.tap_keyword(u'确定', siblings_on=False)
elif status == "IdpNeedConfirm" and self.idp == 'wechat':
if stab.wait_for_keyword(u'确认登录', timeout=60):
stab.tap_keyword(u'确认登录', siblings_on=False)
elif status == "IdpNeedLogin" and self.idp == 'fb':
self.fb_login_handler(stab, account=account, password=password)
elif status == "LoggedIn":
pass
else:
logger.warning("Cannot handle: status - %s, IdP - %s", status, self.idp)
time.sleep(RP_REQUEST_TIMEOUT)
def idp_set_session(self, idp, path):
"""
Set IdP to specific user session by file copying
:param idp: fb, sina or wechat
:param path: path to the folder or file containing session data.
For wechat, no coexisting sessions are allowed, and sessions are bind to device information.
:return: True for success
"""
# make sure adb has root permission, if not exception will be raised
self.adb.root()
# On-device location of session file for different IdP
PKG = {
'fb': 'com.facebook.lite',
'sina': 'com.sina.weibo',
'wechat': 'com.tencent.mm'
}
DST = {
'fb': '/data/data/com.facebook.lite/shared_prefs/',
'sina': '/data/data/com.sina.weibo/databases/sina_weibo',
'wechat': '/data/data/com.tencent.mm/MicroMsg/'
}
self.adb.force_stop(PKG[idp])
self.adb.rm(DST[idp])
self.adb.push(path, DST[idp])
self.adb.chmod(DST[idp])
@property
def page_source(self):
"""wrapper around appium page_source to catch exception"""
source = None
e = None
for _ in range(3):
try:
source = self.driver.page_source
if source:
break
except WebDriverException as e:
time.sleep(1)
continue
else:
raise WebDriverException(e)
return source
|
py
|
1a55c50007fb059f1c428525f49e6dcab30c95a7
|
from setuptools import find_packages, setup
with open('README.md') as readme_file:
readme = readme_file.read()
with open('requirements.txt') as requirements_file:
requirements = requirements_file.read().split('\n')
setup(
author='Philipp Bode, Christian Warmuth',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description=(
'neurogaze provides wrapper functionality to record and '
'visualize eye tracking and application usage data.'
),
install_requires=requirements,
license='MIT license',
long_description=readme,
include_package_data=True,
name='neurogaze',
packages=find_packages(include=['neurogaze']),
url='https://github.com/christianwarmuth/neurogaze',
version='0.0.1',
)
|
py
|
1a55c58210d304e8a665f1470813eccb61ef9c94
|
a = (int(input("Entrez a:")))
b = (int(input("Entrez b:")))
def pgcd(a, b):
while b != 0:
a, b = b, a % b
return a
print("Le pgcd est:", pgcd(a, b))
|
py
|
1a55c682f6d63164f49830a07ec2a92630348140
|
# 消息
def display_message(msg):
"""打印消息"""
print(msg)
display_message('你好')
|
py
|
1a55c6be5d2fa16c43e47ef90b1020367aa9d3b4
|
# coding: utf-8
import os
import re
from time import gmtime, localtime, strftime, time
from django import forms
from django.contrib import messages
from django.contrib.admin.views.decorators import staff_member_required
from django.core.files.storage import (DefaultStorage, FileSystemStorage,
default_storage)
from django.core.paginator import EmptyPage, InvalidPage, Paginator
from django.http import HttpResponseBadRequest, HttpResponseRedirect
from django.shortcuts import HttpResponse, render
from django.template import RequestContext as Context
from django.urls import get_resolver, get_urlconf, reverse
from django.utils.encoding import smart_str
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from filebrowser import signals
# Default actions
from filebrowser.actions import (flip_horizontal, flip_vertical,
rotate_90_clockwise,
rotate_90_counterclockwise, rotate_180)
from filebrowser.base import FileListing, FileObject
from filebrowser.decorators import file_exists, path_exists
from filebrowser.settings import (ADMIN_THUMBNAIL, ADMIN_VERSIONS,
CONVERT_FILENAME, DEFAULT_PERMISSIONS,
DEFAULT_SORTING_BY, DEFAULT_SORTING_ORDER,
DIRECTORY, EXCLUDE, EXTENSION_LIST,
EXTENSIONS, LIST_PER_PAGE, MAX_UPLOAD_SIZE,
NORMALIZE_FILENAME, OVERWRITE_EXISTING,
SEARCH_TRAVERSE, SELECT_FORMATS,
UPLOAD_TEMPDIR, VERSIONS, VERSIONS_BASEDIR)
from filebrowser.storage import FileSystemStorageMixin
from filebrowser.templatetags.fb_tags import query_helper
from filebrowser.utils import convert_filename
try:
import json
except ImportError:
from django.utils import simplejson as json
# Add some required methods to FileSystemStorage
if FileSystemStorageMixin not in FileSystemStorage.__bases__:
FileSystemStorage.__bases__ += (FileSystemStorageMixin,)
# This cache contains all *instantiated* FileBrowser sites
_sites_cache = {}
def get_site_dict(app_name='filebrowser'):
"""
Return a dict with all *deployed* FileBrowser sites that have
a given app_name.
"""
if app_name not in _sites_cache:
return {}
# Get names of all deployed filebrowser sites with a give app_name
deployed = get_resolver(get_urlconf()).app_dict[app_name]
# Get the deployed subset from the cache
return dict((k, v) for k, v in _sites_cache[app_name].items() if k in deployed)
def register_site(app_name, site_name, site):
"""
Add a site into the site dict.
"""
if app_name not in _sites_cache:
_sites_cache[app_name] = {}
_sites_cache[app_name][site_name] = site
def get_default_site(app_name='filebrowser'):
"""
Returns the default site. This function uses Django's url resolution method to
obtain the name of the default site.
"""
# Get the name of the default site:
resolver = get_resolver(get_urlconf())
name = 'filebrowser'
# Django's default name resolution method (see django.core.urlresolvers.reverse())
app_list = resolver.app_dict[app_name]
if name not in app_list:
name = app_list[0]
return get_site_dict()[name]
def get_breadcrumbs(query, path):
"""
Get breadcrumbs.
"""
breadcrumbs = []
dir_query = ""
if path:
for item in path.split(os.sep):
dir_query = os.path.join(dir_query, item)
breadcrumbs.append([item, dir_query])
return breadcrumbs
def get_filterdate(filter_date, date_time):
"""
Get filterdate.
"""
returnvalue = ''
date_year = strftime("%Y", gmtime(date_time))
date_month = strftime("%m", gmtime(date_time))
date_day = strftime("%d", gmtime(date_time))
if filter_date == 'today' and int(date_year) == int(localtime()[0]) and int(date_month) == int(localtime()[1]) and int(date_day) == int(localtime()[2]):
returnvalue = 'true'
elif filter_date == 'thismonth' and date_time >= time() - 2592000:
returnvalue = 'true'
elif filter_date == 'thisyear' and int(date_year) == int(localtime()[0]):
returnvalue = 'true'
elif filter_date == 'past7days' and date_time >= time() - 604800:
returnvalue = 'true'
elif filter_date == '':
returnvalue = 'true'
return returnvalue
def get_settings_var(directory=DIRECTORY):
"""
Get settings variables used for FileBrowser listing.
"""
settings_var = {}
# Main
# Extensions/Formats (for FileBrowseField)
settings_var['EXTENSIONS'] = EXTENSIONS
settings_var['SELECT_FORMATS'] = SELECT_FORMATS
# Versions
settings_var['ADMIN_VERSIONS'] = ADMIN_VERSIONS
settings_var['ADMIN_THUMBNAIL'] = ADMIN_THUMBNAIL
# FileBrowser Options
settings_var['MAX_UPLOAD_SIZE'] = MAX_UPLOAD_SIZE
# Normalize Filenames
settings_var['NORMALIZE_FILENAME'] = NORMALIZE_FILENAME
# Convert Filenames
settings_var['CONVERT_FILENAME'] = CONVERT_FILENAME
# Traverse directories when searching
settings_var['SEARCH_TRAVERSE'] = SEARCH_TRAVERSE
return settings_var
def handle_file_upload(path, file, site):
"""
Handle File Upload.
"""
uploadedfile = None
try:
file_path = os.path.join(path, file.name)
uploadedfile = site.storage.save(file_path, file)
except Exception as inst:
raise inst
return uploadedfile
def filebrowser_view(view):
"Only let staff browse the files"
return staff_member_required(never_cache(view))
class FileBrowserSite(object):
"""
A filebrowser.site defines admin views for browsing your servers media files.
"""
filelisting_class = FileListing
def __init__(self, name=None, app_name='filebrowser', storage=default_storage):
self.name = name
self.app_name = app_name
self.storage = storage
self._actions = {}
self._global_actions = self._actions.copy()
# Register this site in the global site cache
register_site(self.app_name, self.name, self)
# Per-site settings:
self.directory = DIRECTORY
def _directory_get(self):
"Set directory"
return self._directory
def _directory_set(self, val):
"Get directory"
self._directory = val
directory = property(_directory_get, _directory_set)
def get_urls(self):
"URLs for a filebrowser.site"
from django.conf.urls import url
# filebrowser urls (views)
urlpatterns = [
url(r'^browse/$', path_exists(self, filebrowser_view(self.browse)), name="fb_browse"),
url(r'^createdir/', path_exists(self, filebrowser_view(self.createdir)), name="fb_createdir"),
url(r'^upload/', path_exists(self, filebrowser_view(self.upload)), name="fb_upload"),
url(r'^delete_confirm/$', file_exists(self, path_exists(self, filebrowser_view(self.delete_confirm))), name="fb_delete_confirm"),
url(r'^delete/$', file_exists(self, path_exists(self, filebrowser_view(self.delete))), name="fb_delete"),
url(r'^detail/$', file_exists(self, path_exists(self, filebrowser_view(self.detail))), name="fb_detail"),
url(r'^version/$', file_exists(self, path_exists(self, filebrowser_view(self.version))), name="fb_version"),
url(r'^upload_file/$', staff_member_required(csrf_exempt(self._upload_file)), name="fb_do_upload"),
]
return urlpatterns
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
# Check/create short description
if not hasattr(action, 'short_description'):
action.short_description = action.__name__.replace("_", " ").capitalize()
# Check/create applies-to filter
if not hasattr(action, 'applies_to'):
action.applies_to = lambda x: True
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raises KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitally get a registered global action wheather it's enabled or
not. Raises KeyError for invalid names.
"""
return self._global_actions[name]
def applicable_actions(self, fileobject):
"""
Return a list of tuples (name, action) of actions applicable to a given fileobject.
"""
res = []
for name, action in self.actions:
if action.applies_to(fileobject):
res.append((name, action))
return res
@property
def actions(self):
"""
Get all the enabled actions as a list of (name, func). The list
is sorted alphabetically by actions names
"""
res = list(self._actions.items())
res.sort(key=lambda name_func: name_func[0])
return res
@property
def urls(self):
"filebrowser.site URLs"
return self.get_urls(), self.app_name, self.name
def browse(self, request):
"Browse Files/Directories."
filter_re = []
for exp in EXCLUDE:
filter_re.append(re.compile(exp))
# do not filter if VERSIONS_BASEDIR is being used
if not VERSIONS_BASEDIR:
for k, v in VERSIONS.items():
exp = (r'_%s(%s)$') % (k, '|'.join(EXTENSION_LIST))
filter_re.append(re.compile(exp, re.IGNORECASE))
def filter_browse(item):
"Defining a browse filter"
filtered = item.filename.startswith('.')
for re_prefix in filter_re:
if re_prefix.search(item.filename):
filtered = True
if filtered:
return False
return True
query = request.GET.copy()
path = u'%s' % os.path.join(self.directory, query.get('dir', ''))
filelisting = self.filelisting_class(
path,
filter_func=filter_browse,
sorting_by=query.get('o', DEFAULT_SORTING_BY),
sorting_order=query.get('ot', DEFAULT_SORTING_ORDER),
site=self)
files = []
if SEARCH_TRAVERSE and query.get("q"):
listing = filelisting.files_walk_filtered()
else:
listing = filelisting.files_listing_filtered()
# If we do a search, precompile the search pattern now
do_search = query.get("q")
if do_search:
re_q = re.compile(query.get("q").lower(), re.M)
filter_type = query.get('filter_type')
filter_date = query.get('filter_date')
filter_format = query.get('type')
for fileobject in listing:
# date/type filter, format filter
append = False
if (not filter_type or fileobject.filetype == filter_type) and \
(not filter_date or get_filterdate(filter_date, fileobject.date or 0)) and \
(not filter_format or filter_format in fileobject.format):
append = True
# search
if do_search and not re_q.search(fileobject.filename.lower()):
append = False
# always show folders with popups
# otherwise, one is not able to select/filter files within subfolders
if fileobject.filetype == "Folder":
append = True
# MODIFIED: Hide folders in search
if do_search:
append = False
# append
if append:
files.append(fileobject)
filelisting.results_total = len(listing)
filelisting.results_current = len(files)
p = Paginator(files, LIST_PER_PAGE)
page_nr = request.GET.get('p', '1')
try:
page = p.page(page_nr)
except (EmptyPage, InvalidPage):
page = p.page(p.num_pages)
request.current_app = self.name
return render(request, 'filebrowser/index.html', {
'p': p,
'page': page,
'filelisting': filelisting,
'query': query,
'title': _(u'FileBrowser'),
'settings_var': get_settings_var(directory=self.directory),
'breadcrumbs': get_breadcrumbs(query, query.get('dir', '')),
'breadcrumbs_title': "",
'filebrowser_site': self
})
def createdir(self, request):
"Create Directory"
from filebrowser.forms import CreateDirForm
query = request.GET
path = u'%s' % os.path.join(self.directory, query.get('dir', ''))
if request.method == 'POST':
form = CreateDirForm(path, request.POST, filebrowser_site=self)
if form.is_valid():
server_path = os.path.join(path, form.cleaned_data['name'])
try:
signals.filebrowser_pre_createdir.send(sender=request, path=server_path, name=form.cleaned_data['name'], site=self)
self.storage.makedirs(server_path)
signals.filebrowser_post_createdir.send(sender=request, path=server_path, name=form.cleaned_data['name'], site=self)
messages.add_message(request, messages.SUCCESS, _('The Folder %s was successfully created.') % form.cleaned_data['name'])
redirect_url = reverse("filebrowser:fb_browse", current_app=self.name) + query_helper(query, "ot=desc,o=date", "ot,o,filter_type,filter_date,q,p")
return HttpResponseRedirect(redirect_url)
except OSError as e:
errno = e.args[0]
if errno == 13:
form.errors['name'] = forms.utils.ErrorList([_('Permission denied.')])
else:
form.errors['name'] = forms.utils.ErrorList([_('Error creating folder.')])
else:
form = CreateDirForm(path, filebrowser_site=self)
request.current_app = self.name
return render(request, 'filebrowser/createdir.html', {
'form': form,
'query': query,
'title': _(u'New Folder'),
'settings_var': get_settings_var(directory=self.directory),
'breadcrumbs': get_breadcrumbs(query, query.get('dir', '')),
'breadcrumbs_title': _(u'New Folder'),
'filebrowser_site': self
})
def upload(self, request):
"Multipe File Upload."
query = request.GET
request.current_app = self.name
return render(request, 'filebrowser/upload.html', {
'query': query,
'title': _(u'Select files to upload'),
'settings_var': get_settings_var(directory=self.directory),
'breadcrumbs': get_breadcrumbs(query, query.get('dir', '')),
'breadcrumbs_title': _(u'Upload'),
'filebrowser_site': self
})
def delete_confirm(self, request):
"Delete existing File/Directory."
query = request.GET
path = u'%s' % os.path.join(self.directory, query.get('dir', ''))
fileobject = FileObject(os.path.join(path, query.get('filename', '')), site=self)
if fileobject.filetype == "Folder":
filelisting = self.filelisting_class(
os.path.join(path, fileobject.filename),
sorting_by=query.get('o', 'filename'),
sorting_order=query.get('ot', DEFAULT_SORTING_ORDER),
site=self)
filelisting = filelisting.files_walk_total()
if len(filelisting) > 100:
additional_files = len(filelisting) - 100
filelisting = filelisting[:100]
else:
additional_files = None
else:
filelisting = None
additional_files = None
request.current_app = self.name
return render(request, 'filebrowser/delete_confirm.html', {
'fileobject': fileobject,
'filelisting': filelisting,
'additional_files': additional_files,
'query': query,
'title': _(u'Confirm delete'),
'settings_var': get_settings_var(directory=self.directory),
'breadcrumbs': get_breadcrumbs(query, query.get('dir', '')),
'breadcrumbs_title': _(u'Confirm delete'),
'filebrowser_site': self
})
def delete(self, request):
"Delete existing File/Directory."
query = request.GET
path = u'%s' % os.path.join(self.directory, query.get('dir', ''))
fileobject = FileObject(os.path.join(path, query.get('filename', '')), site=self)
if request.GET:
try:
signals.filebrowser_pre_delete.send(sender=request, path=fileobject.path, name=fileobject.filename, site=self)
fileobject.delete_versions()
fileobject.delete()
signals.filebrowser_post_delete.send(sender=request, path=fileobject.path, name=fileobject.filename, site=self)
messages.add_message(request, messages.SUCCESS, _('Successfully deleted %s') % fileobject.filename)
except OSError:
# TODO: define error-message
pass
redirect_url = reverse("filebrowser:fb_browse", current_app=self.name) + query_helper(query, "", "filename,filetype")
return HttpResponseRedirect(redirect_url)
def detail(self, request):
"""
Show detail page for a file.
Rename existing File/Directory (deletes existing Image Versions/Thumbnails).
"""
from filebrowser.forms import ChangeForm
query = request.GET
path = u'%s' % os.path.join(self.directory, query.get('dir', ''))
fileobject = FileObject(os.path.join(path, query.get('filename', '')), site=self)
if request.method == 'POST':
form = ChangeForm(request.POST, path=path, fileobject=fileobject, filebrowser_site=self)
if form.is_valid():
new_name = form.cleaned_data['name']
action_name = form.cleaned_data['custom_action']
try:
action_response = None
if action_name:
action = self.get_action(action_name)
# Pre-action signal
signals.filebrowser_actions_pre_apply.send(sender=request, action_name=action_name, fileobject=[fileobject], site=self)
# Call the action to action
action_response = action(request=request, fileobjects=[fileobject])
# Post-action signal
signals.filebrowser_actions_post_apply.send(sender=request, action_name=action_name, fileobject=[fileobject], result=action_response, site=self)
if new_name != fileobject.filename:
signals.filebrowser_pre_rename.send(sender=request, path=fileobject.path, name=fileobject.filename, new_name=new_name, site=self)
fileobject.delete_versions()
self.storage.move(fileobject.path, os.path.join(fileobject.head, new_name))
signals.filebrowser_post_rename.send(sender=request, path=fileobject.path, name=fileobject.filename, new_name=new_name, site=self)
messages.add_message(request, messages.SUCCESS, _('Renaming was successful.'))
if isinstance(action_response, HttpResponse):
return action_response
if "_continue" in request.POST:
redirect_url = reverse("filebrowser:fb_detail", current_app=self.name) + query_helper(query, "filename=" + new_name, "filename")
else:
redirect_url = reverse("filebrowser:fb_browse", current_app=self.name) + query_helper(query, "", "filename")
return HttpResponseRedirect(redirect_url)
except OSError:
form.errors['name'] = forms.utils.ErrorList([_('Error.')])
else:
form = ChangeForm(initial={"name": fileobject.filename}, path=path, fileobject=fileobject, filebrowser_site=self)
request.current_app = self.name
return render(request, 'filebrowser/detail.html', {
'form': form,
'fileobject': fileobject,
'query': query,
'title': u'%s' % fileobject.filename,
'settings_var': get_settings_var(directory=self.directory),
'breadcrumbs': get_breadcrumbs(query, query.get('dir', '')),
'breadcrumbs_title': u'%s' % fileobject.filename,
'filebrowser_site': self
})
def version(self, request):
"""
Version detail.
This just exists in order to select a version with a filebrowser–popup.
"""
query = request.GET
path = u'%s' % os.path.join(self.directory, query.get('dir', ''))
fileobject = FileObject(os.path.join(path, query.get('filename', '')), site=self)
request.current_app = self.name
return render(request, 'filebrowser/version.html', {
'fileobject': fileobject,
'query': query,
'settings_var': get_settings_var(directory=self.directory),
'filebrowser_site': self
})
def _upload_file(self, request):
"""
Upload file to the server.
If temporary is true, we upload to UPLOAD_TEMPDIR, otherwise
we upload to site.directory
"""
if request.method == "POST":
folder = request.GET.get('folder', '')
temporary = request.GET.get('temporary', '')
temp_filename = None
if len(request.FILES) == 0:
return HttpResponseBadRequest('Invalid request! No files included.')
if len(request.FILES) > 1:
return HttpResponseBadRequest('Invalid request! Multiple files included.')
filedata = list(request.FILES.values())[0]
fb_uploadurl_re = re.compile(r'^.*(%s)' % reverse("filebrowser:fb_upload", current_app=self.name))
folder = fb_uploadurl_re.sub('', folder)
# temporary upload folder should be outside self.directory
if folder == UPLOAD_TEMPDIR and temporary == "true":
path = folder
else:
path = os.path.join(self.directory, folder)
# we convert the filename before uploading in order
# to check for existing files/folders
file_name = convert_filename(filedata.name)
filedata.name = file_name
file_path = os.path.join(path, file_name)
file_already_exists = self.storage.exists(file_path)
# construct temporary filename by adding the upload folder, because
# otherwise we don't have any clue if the file has temporary been
# uploaded or not
if folder == UPLOAD_TEMPDIR and temporary == "true":
temp_filename = os.path.join(folder, file_name)
# Check for name collision with a directory
if file_already_exists and self.storage.isdir(file_path):
ret_json = {'success': False, 'filename': file_name}
return HttpResponse(json.dumps(ret_json))
signals.filebrowser_pre_upload.send(sender=request, path=folder, file=filedata, site=self)
uploadedfile = handle_file_upload(path, filedata, site=self)
if file_already_exists and OVERWRITE_EXISTING:
old_file = smart_str(file_path)
new_file = smart_str(uploadedfile)
self.storage.move(new_file, old_file, allow_overwrite=True)
full_path = FileObject(smart_str(old_file), site=self).path_full
else:
file_name = smart_str(uploadedfile)
filedata.name = os.path.relpath(file_name, path)
full_path = FileObject(smart_str(file_name), site=self).path_full
# set permissions
if DEFAULT_PERMISSIONS is not None:
os.chmod(full_path, DEFAULT_PERMISSIONS)
f = FileObject(smart_str(file_name), site=self)
signals.filebrowser_post_upload.send(sender=request, path=folder, file=f, site=self)
# let Ajax Upload know whether we saved it or not
ret_json = {'success': True, 'filename': f.filename, 'temp_filename': temp_filename}
return HttpResponse(json.dumps(ret_json), content_type="application/json")
storage = DefaultStorage()
# Default FileBrowser site
site = FileBrowserSite(name='filebrowser', storage=storage)
site.add_action(flip_horizontal)
site.add_action(flip_vertical)
site.add_action(rotate_90_clockwise)
site.add_action(rotate_90_counterclockwise)
site.add_action(rotate_180)
|
py
|
1a55c7188ee84f2f8a37f3d08d88c9ac2540c0c9
|
'''
Module for gathering and managing network information
'''
# Import python libs
import logging
# Import salt libs
from salt.utils.socket_util import sanitize_host
__outputter__ = {
'dig': 'txt',
'ping': 'txt',
'netstat': 'txt',
}
log = logging.getLogger(__name__)
def __virtual__():
'''
Only work on posix-like systems
'''
# Disable on Windows, a specific file module exists:
if __grains__['os'] in ('Windows',):
return False
return 'network'
def _cidr_to_ipv4_netmask(cidr_bits):
'''
Returns an IPv4 netmask
'''
netmask = ''
for n in range(4):
if n:
netmask += '.'
if cidr_bits >= 8:
netmask += '255'
cidr_bits -= 8
else:
netmask += '{0:d}'.format(256-(2**(8-cidr_bits)))
cidr_bits = 0
return netmask
def _number_of_set_bits_to_ipv4_netmask(set_bits):
'''
Returns an IPv4 netmask from the integer representation of that mask.
Ex. 0xffffff00 -> '255.255.255.0'
'''
return _cidr_to_ipv4_netmask(_number_of_set_bits(set_bits))
def _number_of_set_bits(x):
'''
Returns the number of bits that are set in a 32bit int
'''
#Taken from http://stackoverflow.com/a/4912729. Many thanks!
x -= (x >> 1) & 0x55555555
x = ((x >> 2) & 0x33333333) + (x & 0x33333333)
x = ((x >> 4) + x) & 0x0f0f0f0f
x += x >> 8
x += x >> 16
return x & 0x0000003f
def _interfaces_ip(out):
'''
Uses ip to return a dictionary of interfaces with various information about
each (up/down state, ip address, netmask, and hwaddr)
'''
import re
ret = dict()
def parse_network(value, cols):
'''
Return a tuple of ip, netmask, broadcast
based on the current set of cols
'''
brd = None
if '/' in value: # we have a CIDR in this address
ip, cidr = value.split('/')
else:
ip = value
cidr = 32
if type == 'inet':
mask = _cidr_to_ipv4_netmask(int(cidr))
if 'brd' in cols:
brd = cols[cols.index('brd')+1]
elif type == 'inet6':
mask = cidr
return (ip, mask, brd)
groups = re.compile('\r?\n\d').split(out)
for group in groups:
iface = None
data = dict()
for line in group.splitlines():
if not ' ' in line:
continue
m = re.match('^\d*:\s+([\w.]+)(?:@)?(\w+)?:\s+<(.+)>', line)
if m:
iface, parent, attrs = m.groups()
if 'UP' in attrs.split(','):
data['up'] = True
else:
data['up'] = False
if parent:
data['parent'] = parent
continue
cols = line.split()
if len(cols) >= 2:
type, value = tuple(cols[0:2])
iflabel = cols[-1:][0]
if type in ('inet', 'inet6'):
if 'secondary' not in cols:
ipaddr, netmask, broadcast = parse_network(value, cols)
if type == 'inet':
if 'inet' not in data:
data['inet'] = list()
addr_obj = dict()
addr_obj['address'] = ipaddr
addr_obj['netmask'] = netmask
addr_obj['broadcast'] = broadcast
addr_obj['label'] = iflabel
data['inet'].append(addr_obj)
elif type == 'inet6':
if 'inet6' not in data:
data['inet6'] = list()
addr_obj = dict()
addr_obj['address'] = ipaddr
addr_obj['prefixlen'] = netmask
data['inet6'].append(addr_obj)
else:
if 'secondary' not in data:
data['secondary'] = list()
ip, mask, brd = parse_network(value, cols)
data['secondary'].append({
'type': type,
'address': ip,
'netmask': mask,
'broadcast': brd,
'label': iflabel,
})
del ip, mask, brd
elif type.startswith('link'):
data['hwaddr'] = value
if iface:
ret[iface] = data
del iface, data
return ret
def _interfaces_ifconfig(out):
'''
Uses ifconfig to return a dictionary of interfaces with various information
about each (up/down state, ip address, netmask, and hwaddr)
'''
import re
ret = dict()
piface = re.compile('^(\S+):?')
pmac = re.compile('.*?(?:HWaddr|ether) ([0-9a-fA-F:]+)')
pip = re.compile('.*?(?:inet addr:|inet )(.*?)\s')
pip6 = re.compile('.*?(?:inet6 addr: (.*?)/|inet6 )([0-9a-fA-F:]+)')
pmask = re.compile('.*?(?:Mask:|netmask )(?:(0x[0-9a-fA-F]{8})|([\d\.]+))')
pmask6 = re.compile('.*?(?:inet6 addr: [0-9a-fA-F:]+/(\d+)|prefixlen (\d+)).*')
pupdown = re.compile('UP')
pbcast = re.compile('.*?(?:Bcast:|broadcast )([\d\.]+)')
groups = re.compile('\r?\n(?=\S)').split(out)
for group in groups:
data = dict()
iface = ''
updown = False
for line in group.splitlines():
miface = piface.match(line)
mmac = pmac.match(line)
mip = pip.match(line)
mip6 = pip6.match(line)
mupdown = pupdown.search(line)
if miface:
iface = miface.group(1)
if mmac:
data['hwaddr'] = mmac.group(1)
if mip:
if 'inet' not in data:
data['inet'] = list()
addr_obj = dict()
addr_obj['address'] = mip.group(1)
mmask = pmask.match(line)
if mmask:
if mmask.group(1):
mmask = _number_of_set_bits_to_ipv4_netmask(
int(mmask.group(1), 16))
else:
mmask = mmask.group(2)
addr_obj['netmask'] = mmask
mbcast = pbcast.match(line)
if mbcast:
addr_obj['broadcast'] = mbcast.group(1)
data['inet'].append(addr_obj)
if mupdown:
updown = True
if mip6:
if 'inet6' not in data:
data['inet6'] = list()
addr_obj = dict()
addr_obj['address'] = mip6.group(1) or mip6.group(2)
mmask6 = pmask6.match(line)
if mmask6:
addr_obj['prefixlen'] = mmask6.group(1) or mmask6.group(2)
data['inet6'].append(addr_obj)
data['up'] = updown
ret[iface] = data
del data
return ret
def interfaces():
'''
Return a dictionary of information about all the interfaces on the minion
CLI Example::
salt '*' network.interfaces
'''
ifaces = dict()
if __salt__['cmd.has_exec']('ip'):
cmd1 = __salt__['cmd.run']('ip link show')
cmd2 = __salt__['cmd.run']('ip addr show')
ifaces = _interfaces_ip(cmd1 + '\n' + cmd2)
elif __salt__['cmd.has_exec']('ifconfig'):
cmd = __salt__['cmd.run']('ifconfig -a')
ifaces = _interfaces_ifconfig(cmd)
return ifaces
def _get_net_start(ipaddr, netmask):
ipaddr_octets = ipaddr.split('.')
netmask_octets = netmask.split('.')
net_start_octets = [str(int(ipaddr_octets[x]) & int(netmask_octets[x]))
for x in range(0, 4)]
return '.'.join(net_start_octets)
def _get_net_size(mask):
binary_str = ''
for octet in mask.split('.'):
binary_str += bin(int(octet))[2:].zfill(8)
return len(binary_str.rstrip('0'))
def _calculate_subnet(ipaddr, netmask):
return '{0}/{1}'.format(_get_net_start(ipaddr, netmask),
_get_net_size(netmask))
def _ipv4_to_bits(ipaddr):
'''
Accepts an IPv4 dotted quad and returns a string representing its binary
counterpart
'''
return ''.join([bin(int(x))[2:].rjust(8, '0') for x in ipaddr.split('.')])
def subnets():
'''
Returns a list of subnets to which the host belongs
'''
ifaces = interfaces()
subnets = []
for ipv4_info in ifaces.values():
for ipv4 in ipv4_info.get('inet', []):
if ipv4['address'] == '127.0.0.1': continue
network = _calculate_subnet(ipv4['address'], ipv4['netmask'])
subnets.append(network)
return subnets
def in_subnet(cidr):
'''
Returns True if host is within specified subnet, otherwise False
'''
try:
netstart, netsize = cidr.split('/')
netsize = int(netsize)
except:
log.error('Invalid CIDR \'{0}\''.format(cidr))
return False
netstart_bin = _ipv4_to_bits(netstart)
if netsize < 32 and len(netstart_bin.rstrip('0')) > netsize:
log.error('Invalid network starting IP \'{0}\' in CIDR '
'\'{1}\''.format(netstart, cidr))
return False
netstart_leftbits = netstart_bin[0:netsize]
for ip_addr in ip_addrs():
if netsize == 32:
if netstart == ip_addr: return True
else:
ip_leftbits = _ipv4_to_bits(ip_addr)[0:netsize]
if netstart_leftbits == ip_leftbits: return True
return False
def ip_addrs(interface=None, include_loopback=False):
'''
Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is
ignored, unless 'include_loopback=True' is indicated. If 'interface' is
provided, then only IP addresses from that interface will be returned.
'''
ret = []
ifaces = interfaces()
if interface is None:
target_ifaces = ifaces
else:
target_ifaces = dict([(k,v) for k,v in ifaces.iteritems()
if k == interface])
if not target_ifaces:
log.error('Interface {0} not found.'.format(interface))
for ipv4_info in target_ifaces.values():
for ipv4 in ipv4_info.get('inet',[]):
if include_loopback \
or (not include_loopback and ipv4['address'] != '127.0.0.1'):
ret.append(ipv4['address'])
return ret
def ip_addrs6(interface=None, include_loopback=False):
'''
Returns a list of IPv6 addresses assigned to the host. ::1 is ignored,
unless 'include_loopback=True' is indicated. If 'interface' is provided,
then only IP addresses from that interface will be returned.
'''
ret = []
ifaces = interfaces()
if interface is None:
target_ifaces = ifaces
else:
target_ifaces = dict([(k,v) for k,v in ifaces.iteritems()
if k == interface])
if not target_ifaces:
log.error('Interface {0} not found.'.format(interface))
for ipv6_info in target_ifaces.values():
for ipv6 in ipv6_info.get('inet6',[]):
if include_loopback \
or (not include_loopback and ipv6['address'] != '::1'):
ret.append(ipv6['address'])
return ret
def ping(host):
'''
Performs a ping to a host
CLI Example::
salt '*' network.ping archlinux.org
'''
cmd = 'ping -c 4 {0}'.format(sanitize_host(host))
return __salt__['cmd.run'](cmd)
# FIXME: Does not work with: netstat 1.42 (2001-04-15) from net-tools 1.6.0 (Ubuntu 10.10)
def netstat():
'''
Return information on open ports and states
CLI Example::
salt '*' network.netstat
'''
ret = []
cmd = 'netstat -tulpnea'
out = __salt__['cmd.run'](cmd).splitlines()
for line in out:
comps = line.split()
if line.startswith('tcp'):
ret.append({
'inode': comps[7],
'local-address': comps[3],
'program': comps[8],
'proto': comps[0],
'recv-q': comps[1],
'remote-address': comps[4],
'send-q': comps[2],
'state': comps[5],
'user': comps[6]})
if line.startswith('udp'):
ret.append({
'inode': comps[6],
'local-address': comps[3],
'program': comps[7],
'proto': comps[0],
'recv-q': comps[1],
'remote-address': comps[4],
'send-q': comps[2],
'user': comps[5]})
return ret
# FIXME: This is broken on: Modern traceroute for Linux, version 2.0.14, May 10 2010 (Ubuntu 10.10)
# FIXME: traceroute is deprecated, make this fall back to tracepath
def traceroute(host):
'''
Performs a traceroute to a 3rd party host
CLI Example::
salt '*' network.traceroute archlinux.org
'''
ret = []
cmd = 'traceroute {0}'.format(sanitize_host(host))
out = __salt__['cmd.run'](cmd)
for line in out:
if not ' ' in line:
continue
if line.startswith('traceroute'):
continue
comps = line.split()
result = {
'count': comps[0],
'hostname': comps[1],
'ip': comps[2],
'ms1': comps[4],
'ms2': comps[6],
'ms3': comps[8],
'ping1': comps[3],
'ping2': comps[5],
'ping3': comps[7]}
ret.append(result)
return ret
def dig(host):
'''
Performs a DNS lookup with dig
CLI Example::
salt '*' network.dig archlinux.org
'''
cmd = 'dig {0}'.format(sanitize_host(host))
return __salt__['cmd.run'](cmd)
|
py
|
1a55c7ad07164f13c44efcabf514e503d434e43e
|
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
'''
RestrictedPython package.
'''
from SelectCompiler import *
from PrintCollector import PrintCollector
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.