max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
falkon/models/__init__.py | mohamad-amin/falkon | 130 | 12636019 | from .falkon import Falkon
from .logistic_falkon import LogisticFalkon
from .incore_falkon import InCoreFalkon
__all__ = ("Falkon", "LogisticFalkon", "InCoreFalkon", )
|
envi/archs/msp430/const.py | rnui2k/vivisect | 716 | 12636033 | from envi.const import *
from envi import IF_NOFALL, IF_PRIV, IF_CALL, IF_BRANCH, IF_RET, IF_COND
IF_BYTE = 1<<8
# No operand instructions
nocode = [
'.word' # Something is wrong, so return the dirty word
]
# Single operand intructions
scode = [
('rrc', 0), # RRC Rotate right through carry
('swpb', 0), # SWPB Swap bytes
('rra', 0), # RRA Rotate right arithmetic
('sxt', 0), # SXT Sign extend byte to word
('push', 0), # PUSH Push value onto stack
('call', IF_CALL), # CALL Subroutine call; push PC and move source to PC
('reti', IF_NOFALL), # RETI Return from interrupt; pop SR then pop PC
]
# Jump conditions
jcode = [
('jnz', IF_BRANCH | IF_COND), # JNE/JNZ Jump if not equal/zero
('jz', IF_BRANCH | IF_COND), # JEQ/JZ Jump if equal/zero
('jnc', IF_BRANCH | IF_COND), # JNC/JLO Jump if no carry/lower
('jc', IF_BRANCH | IF_COND), # JC/JHS Jump if carry/higher or same
('jn', IF_BRANCH | IF_COND), # JN Jump if negative
('jge', IF_BRANCH | IF_COND), # JGE Jump if greater or equal
('jl', IF_BRANCH | IF_COND), # JL Jump if less
('jmp', IF_BRANCH | IF_NOFALL), # JMP Jump (unconditionally)
]
# Double operand instrucitons
dcode = [
'mov', # MOV Move source to destination
'add', # ADD Add source to destination
'addc', # ADDC Add source and carry to destination
'subc', # SUBC Subtract source from destination (with carry)
'sub', # SUB Subtract source from destination
'cmp', # CMP Compare (pretend to subtract) source from destination
'dadd', # Decimal add source to destination (with carry)
'bit', # BIT Test bits of source AND destination
'bic', # BIC Bit clear (dest &= ~src)
'bis', # BIS Bit set (logical OR)
'xor', # XOR Exclusive or source with destination
'and' # AND Logical AND source with destination (dest &= src)
]
# Double special operand instructions
dspcode = [
('nop', 0), # No Operation - MOV
('pop', 0), # POP stackpointer - MOV
('br', IF_BRANCH|IF_NOFALL), # Branch - MOV
('ret', IF_NOFALL), # Return - MOV
('clr', 0), # Clear destination - MOV
('rla', 0), # Shift and rotate left - ADD
('inc', 0), # Increment by one - ADD
('incd', 0), # Increment by two - ADD
('rlc', 0), # Shift and rotate left - ADDC
('adc', 0), # Adding only the carry bit - ADDC
('sbc', 0), # Subtracting only the carry bit - SUBC
('dec', 0), # Decrement by one - SUB
('decd', 0), # Decrement by two - SUB
('tst', 0), # Test - CMP
('dadc', 0), # Decimal adding only the carry bit - DADD
('clrc', 0), # Status register operation - BIC
('setc', 0), # Status register operation - BIS
('clrz', 0), # Status register operation - BIC
('setz', 0), # Status register operation - BIS
('clrn', 0), # Status register operation - BIC
('setn', 0), # Status register operation - BIS
('dint', 0), # Status register operation - BIC
('eint', 0), # Status register operation - BIC
('inv', 0), # Invert value - XOR
]
# Operand Type
SINGLE_OPCODE_TYPE = 0
JUMP_OPCODE_TYPE = 1
DOUBLE_OPCODE_TYPE = 2
SP_OPCODE_TYPE = 3
# Register Modes
REG_DIRECT = 0x0
REG_INDEX = 0x1
REG_INDIRECT = 0x2
REG_IND_AUTOINC = 0x3
JUMP_MODE = 0x4
# Masks
TEST_MASKS = 0xF000 # Test operands
SINGLE_MASKS = 0xE000 # ID single operands
JUMP_MASKS = 0xC000 # ID jumps
JUMP_OFFSET = 0x3FF # Jump offset
SOURCE_REG = 0xF # Single op reg
DSOURCE_REG = 0xF00 # Double source reg
DEST_REG = 0xF # Double dest reg
BYTE_WORD = 0x40 # Byte or word
SOURCE_ADDR_MODE = 0x30 # Addressing mode source
DEST_ADDR_MODE = 0x80 # Addressing mode destination
RETI_MASK = 0x1300 # Return 'reti'
REG_BYTE = 0x00FF # Clear the most significant byte
REG_FLAGS = 0x01FF # Clear most significant 7 bits to get Status Register flags
# Compare to get the proper Opcode
SINGLE_OPCODE = 0xF80 # Single opcode - rotate right 7
DOUBLE_OPCODE = 0xF000 # Double opcode - rotate right 12
JUMP_OPCODE = 0x1c00 # Jump condition - rotate right 10
# Sizes
BYTE = 1 # bytes
WORD = 2 # bytes
|
src/OFS/metadirectives.py | rbanffy/Zope | 289 | 12636044 | from zope.configuration.fields import Bool
from zope.configuration.fields import GlobalObject
from zope.interface import Interface
from zope.schema import ASCII
from zope.security.zcml import Permission
class IDeprecatedManageAddDeleteDirective(Interface):
"""Call manage_afterAdd & co for these contained content classes.
"""
class_ = GlobalObject(
title="Class",
required=True)
class IRegisterClassDirective(Interface):
"""registerClass directive schema.
Register content with Zope 2.
"""
class_ = GlobalObject(
title='Instance Class',
description='Dotted name of the class that is registered.',
required=True)
meta_type = ASCII(
title='Meta Type',
description='A human readable unique identifier for the class.',
required=True)
permission = Permission(
title='Add Permission',
description='The permission for adding objects of this class.',
required=True)
addview = ASCII(
title='Add View ID',
description='The ID of the add view used in the ZMI. Consider this '
'required unless you know exactly what you do.',
default=None,
required=False)
icon = ASCII(
title='Icon ID',
description='The ID of the icon used in the ZMI.',
default=None,
required=False)
global_ = Bool(
title='Global scope?',
description='If "global" is False the class is only available in '
'containers that explicitly allow one of its interfaces.',
default=True,
required=False)
class IRegisterPackageDirective(Interface):
"""Registers the given Python package which at a minimum fools zope2 into
thinking of it as a zope2 product.
"""
package = GlobalObject(
title='Target package',
required=True)
initialize = GlobalObject(
title='Initialization function to invoke',
description='The dotted name of a function that will get invoked '
'with a ProductContext instance',
required=False)
|
cadence/tests/test_handle_workflow_execution_signaled.py | simkimsia/temporal-python-sdk | 141 | 12636086 | <filename>cadence/tests/test_handle_workflow_execution_signaled.py
import json
from unittest.mock import MagicMock, Mock
import pytest
from cadence.cadence_types import HistoryEvent, WorkflowExecutionSignaledEventAttributes
from cadence.decision_loop import ReplayDecider, WorkflowMethodTask, Status
from cadence.worker import Worker
from cadence.workflow import signal_method
@pytest.fixture
def workflow_instance():
return DummyWorkflow()
@pytest.fixture
def workflow_task(decider, workflow_instance):
workflow_task = WorkflowMethodTask(task_id=decider.execution_id, workflow_input=None,
worker=decider.worker, workflow_type=Mock(), decider=decider)
decider.workflow_task = workflow_task
workflow_task.workflow_instance = workflow_instance
return workflow_task
@pytest.fixture
def worker():
worker = Worker()
worker.register_workflow_implementation_type(DummyWorkflow)
return worker
@pytest.fixture
def decider(worker):
return ReplayDecider("run-id", MagicMock(), worker)
class DummyWorkflow:
@signal_method
def the_signal_method(self, name, age):
pass
def test_handle_workflow_execution_signaled(decider, workflow_task):
assert isinstance(MagicMock, object)
event = HistoryEvent()
event.workflow_execution_signaled_event_attributes = WorkflowExecutionSignaledEventAttributes()
event.workflow_execution_signaled_event_attributes.signal_name = "DummyWorkflow::the_signal_method"
event.workflow_execution_signaled_event_attributes.input = json.dumps(["bob", 28]);
decider.handle_workflow_execution_signaled(event)
assert decider.tasks
task = decider.tasks[0]
assert task.signal_name == "DummyWorkflow::the_signal_method"
assert task.signal_input == ["bob", 28]
assert task.decider == decider
assert task.task_id == "run-id"
assert task.status == Status.CREATED
|
_build/jupyter_execute/content/c5/s2/classification_tree.py | curioushruti/mlbook | 970 | 12636088 | <filename>_build/jupyter_execute/content/c5/s2/classification_tree.py
# Classification Trees
The construction of a classification tree is very similar to that of a regression tree. For a fuller description of the code below, please see the regression tree code on the previous page.
## Import packages
import numpy as np
from itertools import combinations
import matplotlib.pyplot as plt
import seaborn as sns
## Load data
penguins = sns.load_dataset('penguins')
penguins.dropna(inplace = True)
X = np.array(penguins.drop(columns = 'species'))
y = np.array(penguins['species'])
## Train-test split
np.random.seed(123)
test_frac = 0.25
test_size = int(len(y)*test_frac)
test_idxs = np.random.choice(np.arange(len(y)), test_size, replace = False)
X_train = np.delete(X, test_idxs, 0)
y_train = np.delete(y, test_idxs, 0)
X_test = X[test_idxs]
y_test = y[test_idxs]
We will build our classification tree on the {doc}`penguins </content/appendix/data>` dataset from `seaborn`. This dataset has a categorical target variable—penguin breed—with both quantitative and categorical predictors.
## 1. Helper Functions
Let's first create our loss functions. The Gini index and cross-entropy calculate the loss for a single node while the `split_loss()` function creates the weighted loss of a split.
## Loss Functions
def gini_index(y):
size = len(y)
classes, counts = np.unique(y, return_counts = True)
pmk = counts/size
return np.sum(pmk*(1-pmk))
def cross_entropy(y):
size = len(y)
classes, counts = np.unique(y, return_counts = True)
pmk = counts/size
return -np.sum(pmk*np.log2(pmk))
def split_loss(child1, child2, loss = cross_entropy):
return (len(child1)*loss(child1) + len(child2)*loss(child2))/(len(child1) + len(child2))
Next, let's define a few miscellaneous helper functions. As in the regression tree construction, `all_rows_equal()` checks if all of a bud's rows (observations) are equal across all predictors. If this is the case, this bud will not be split and instead becomes a terminal leaf. The second function, `possible_splits()`, returns all possible ways to divide the classes in a categorical predictor into two. Specifically, it returns all possible sets of values which can be used to funnel observations into the "left" child node. An example is given below for a predictor with four categories, $a$ through $d$. The set $\{a, b\}$, for instance, would imply observations where that predictor equals $a$ or $b$ go to the left child and other observations go to the right child. (Note that this function requires the `itertools` package).
## Helper Functions
def all_rows_equal(X):
return (X == X[0]).all()
def possible_splits(x):
L_values = []
for i in range(1, int(np.floor(len(x)/2)) + 1):
L_values.extend(list(combinations(x, i)))
return L_values
possible_splits(['a','b','c','d'])
## 2. Helper Classes
Next, we define two classes to help our main decision tree classifier. These classes are essentially identical to those discussed in the regression tree page. The only difference is the loss function used to evaluate a split.
class Node:
def __init__(self, Xsub, ysub, ID, obs, depth = 0, parent_ID = None, leaf = True):
self.Xsub = Xsub
self.ysub = ysub
self.ID = ID
self.obs = obs
self.size = len(ysub)
self.depth = depth
self.parent_ID = parent_ID
self.leaf = leaf
class Splitter:
def __init__(self):
self.loss = np.inf
self.no_split = True
def _replace_split(self, Xsub_d, loss, d, dtype = 'quant', t = None, L_values = None):
self.loss = loss
self.d = d
self.dtype = dtype
self.t = t
self.L_values = L_values
self.no_split = False
if dtype == 'quant':
self.L_obs = self.obs[Xsub_d <= t]
self.R_obs = self.obs[Xsub_d > t]
else:
self.L_obs = self.obs[np.isin(Xsub_d, L_values)]
self.R_obs = self.obs[~np.isin(Xsub_d, L_values)]
## 3. Main Class
Finally, we create the main class for our classification tree. This again is essentially identical to the regression tree class. In addition to differing in the loss function used to evaluate splits, this tree differs from the regression tree in how it forms predictions. In regression trees, the fitted value for a test observation was the average target variable of the training observations landing in the same leaf. In the classification tree, since our target variable is categorical, we instead use the most common class among training observations landing in the same leaf.
class DecisionTreeClassifier:
#############################
######## 1. TRAINING ########
#############################
######### FIT ##########
def fit(self, X, y, loss_func = cross_entropy, max_depth = 100, min_size = 2, C = None):
## Add data
self.X = X
self.y = y
self.N, self.D = self.X.shape
dtypes = [np.array(list(self.X[:,d])).dtype for d in range(self.D)]
self.dtypes = ['quant' if (dtype == float or dtype == int) else 'cat' for dtype in dtypes]
## Add model parameters
self.loss_func = loss_func
self.max_depth = max_depth
self.min_size = min_size
self.C = C
## Initialize nodes
self.nodes_dict = {}
self.current_ID = 0
initial_node = Node(Xsub = X, ysub = y, ID = self.current_ID, obs = np.arange(self.N), parent_ID = None)
self.nodes_dict[self.current_ID] = initial_node
self.current_ID += 1
# Build
self._build()
###### BUILD TREE ######
def _build(self):
eligible_buds = self.nodes_dict
for layer in range(self.max_depth):
## Find eligible nodes for layer iteration
eligible_buds = {ID:node for (ID, node) in self.nodes_dict.items() if
(node.leaf == True) &
(node.size >= self.min_size) &
(~all_rows_equal(node.Xsub)) &
(len(np.unique(node.ysub)) > 1)}
if len(eligible_buds) == 0:
break
## split each eligible parent
for ID, bud in eligible_buds.items():
## Find split
self._find_split(bud)
## Make split
if not self.splitter.no_split:
self._make_split()
###### FIND SPLIT ######
def _find_split(self, bud):
## Instantiate splitter
splitter = Splitter()
splitter.bud_ID = bud.ID
splitter.obs = bud.obs
## For each (eligible) predictor...
if self.C is None:
eligible_predictors = np.arange(self.D)
else:
eligible_predictors = np.random.choice(np.arange(self.D), self.C, replace = False)
for d in sorted(eligible_predictors):
Xsub_d = bud.Xsub[:,d]
dtype = self.dtypes[d]
if len(np.unique(Xsub_d)) == 1:
continue
## For each value...
if dtype == 'quant':
for t in np.unique(Xsub_d)[:-1]:
ysub_L = bud.ysub[Xsub_d <= t]
ysub_R = bud.ysub[Xsub_d > t]
loss = split_loss(ysub_L, ysub_R, loss = self.loss_func)
if loss < splitter.loss:
splitter._replace_split(Xsub_d, loss, d, 'quant', t = t)
else:
for L_values in possible_splits(np.unique(Xsub_d)):
ysub_L = bud.ysub[np.isin(Xsub_d, L_values)]
ysub_R = bud.ysub[~np.isin(Xsub_d, L_values)]
loss = split_loss(ysub_L, ysub_R, loss = self.loss_func)
if loss < splitter.loss:
splitter._replace_split(Xsub_d, loss, d, 'cat', L_values = L_values)
## Save splitter
self.splitter = splitter
###### MAKE SPLIT ######
def _make_split(self):
## Update parent node
parent_node = self.nodes_dict[self.splitter.bud_ID]
parent_node.leaf = False
parent_node.child_L = self.current_ID
parent_node.child_R = self.current_ID + 1
parent_node.d = self.splitter.d
parent_node.dtype = self.splitter.dtype
parent_node.t = self.splitter.t
parent_node.L_values = self.splitter.L_values
parent_node.L_obs, parent_node.R_obs = self.splitter.L_obs, self.splitter.R_obs
## Get X and y data for children
if parent_node.dtype == 'quant':
L_condition = parent_node.Xsub[:,parent_node.d] <= parent_node.t
else:
L_condition = np.isin(parent_node.Xsub[:,parent_node.d], parent_node.L_values)
Xchild_L = parent_node.Xsub[L_condition]
ychild_L = parent_node.ysub[L_condition]
Xchild_R = parent_node.Xsub[~L_condition]
ychild_R = parent_node.ysub[~L_condition]
## Create child nodes
child_node_L = Node(Xchild_L, ychild_L, obs = parent_node.L_obs, depth = parent_node.depth + 1,
ID = self.current_ID, parent_ID = parent_node.ID)
child_node_R = Node(Xchild_R, ychild_R, obs = parent_node.R_obs, depth = parent_node.depth + 1,
ID = self.current_ID+1, parent_ID = parent_node.ID)
self.nodes_dict[self.current_ID] = child_node_L
self.nodes_dict[self.current_ID + 1] = child_node_R
self.current_ID += 2
#############################
####### 2. PREDICTING #######
#############################
###### LEAF MODES ######
def _get_leaf_modes(self):
self.leaf_modes = {}
for node_ID, node in self.nodes_dict.items():
if node.leaf:
values, counts = np.unique(node.ysub, return_counts=True)
self.leaf_modes[node_ID] = values[np.argmax(counts)]
####### PREDICT ########
def predict(self, X_test):
# Calculate leaf modes
self._get_leaf_modes()
yhat = []
for x in X_test:
node = self.nodes_dict[0]
while not node.leaf:
if node.dtype == 'quant':
if x[node.d] <= node.t:
node = self.nodes_dict[node.child_L]
else:
node = self.nodes_dict[node.child_R]
else:
if x[node.d] in node.L_values:
node = self.nodes_dict[node.child_L]
else:
node = self.nodes_dict[node.child_R]
yhat.append(self.leaf_modes[node.ID])
return np.array(yhat)
A classificaiton tree is built on the {doc}`penguins </content/appendix/data>` dataset. We evaluate the predictions on a test set and find that roughly 95% of observations are correctly classified.
## Build classifier
tree = DecisionTreeClassifier()
tree.fit(X_train, y_train, max_depth = 10, min_size = 10)
y_test_hat = tree.predict(X_test)
## Evaluate on test data
np.mean(y_test_hat == y_test) |
examples/s3-resource/process.py | dwolfschlaeger/guildai | 694 | 12636093 | <reponame>dwolfschlaeger/guildai<gh_stars>100-1000
import os
import click
import pandas
@click.command()
@click.option(
"--input-dir", metavar="DIR", default=".", help="Path to scan for raw data."
)
@click.option(
"--output-dir",
metavar="DIR",
default=".",
help="Path under which to save processed data.",
)
def process(input_dir, output_dir):
for root, _dirs, names in os.walk(input_dir, followlinks=True):
for name in names:
if name.endswith(".csv"):
input_path = os.path.join(root, name)
df = pandas.read_csv(input_path)
output_name = name + ".pkl"
output_path = os.path.join(output_dir, output_name)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print("Converting %s to %s" % (input_path, output_path))
df.to_pickle(output_path)
if __name__ == "__main__":
process()
|
eval_vr.py | bryant1410/HERO | 173 | 12636096 | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
run evaluation of VR
"""
import argparse
import os
from os.path import exists
from time import time
import torch
from torch.utils.data import DataLoader
import numpy as np
from tqdm import tqdm
import pprint
from apex import amp
from horovod import torch as hvd
from data import (VrFullEvalDataset, vr_full_eval_collate,
VrVideoOnlyFullEvalDataset,
PrefetchLoader, MsrvttQueryTokLmdb,
video_collate)
from load_data import (
get_video_ids, load_video_sub_dataset,
load_video_only_dataset)
from data.loader import move_to_cuda
from model.vr import HeroForVr
from utils.logger import LOGGER
from utils.const import VFEAT_DIM, VCMR_IOU_THDS
from utils.tvr_standalone_eval import eval_retrieval
from utils.distributed import all_gather_list
from utils.misc import Struct
from utils.basic_utils import (
load_json, save_json)
from utils.tvr_eval_utils import get_submission_top_n
def main(opts):
hvd.init()
n_gpu = hvd.size()
device = torch.device("cuda", hvd.local_rank())
torch.cuda.set_device(hvd.local_rank())
rank = hvd.rank()
LOGGER.info("device: {} n_gpu: {}, rank: {}, "
"16-bits training: {}".format(
device, n_gpu, hvd.rank(), opts.fp16))
if hvd.rank() != 0:
LOGGER.disabled = True
hps_file = f'{opts.output_dir}/log/hps.json'
model_opts = Struct(load_json(hps_file))
model_config = f'{opts.output_dir}/log/model_config.json'
# load DBs and image dirs
video_ids = get_video_ids(opts.query_txt_db)
if opts.task != "msrvtt_video_only":
video_db = load_video_sub_dataset(
opts.vfeat_db, opts.sub_txt_db, model_opts.vfeat_interval,
model_opts)
else:
txt_meta = load_json(
os.path.join(opts.query_txt_db, "meta.json"))
video_db = load_video_only_dataset(
opts.vfeat_db, txt_meta,
model_opts.vfeat_interval,
model_opts)
assert opts.split in opts.query_txt_db
q_txt_db = MsrvttQueryTokLmdb(opts.query_txt_db, -1)
if opts.task != "msrvtt_video_only":
inf_dataset = VrFullEvalDataset
else:
inf_dataset = VrVideoOnlyFullEvalDataset
eval_dataset = inf_dataset(
video_ids, video_db, q_txt_db,
distributed=model_opts.distributed_eval)
# Prepare model
if exists(opts.checkpoint):
ckpt_file = opts.checkpoint
else:
ckpt_file = f'{opts.output_dir}/ckpt/model_step_{opts.checkpoint}.pt'
checkpoint = torch.load(ckpt_file)
img_pos_embed_weight_key = (
"v_encoder.f_encoder.img_embeddings" +
".position_embeddings.weight")
assert img_pos_embed_weight_key in checkpoint
max_frm_seq_len = len(checkpoint[img_pos_embed_weight_key])
model = HeroForVr.from_pretrained(
model_config,
state_dict=checkpoint,
vfeat_dim=VFEAT_DIM,
max_frm_seq_len=max_frm_seq_len,
lw_neg_ctx=model_opts.lw_neg_ctx,
lw_neg_q=model_opts.lw_neg_q,
ranking_loss_type=model_opts.ranking_loss_type,
use_hard_negative=False,
hard_pool_size=model_opts.hard_pool_size,
margin=model_opts.margin,
use_all_neg=model_opts.use_all_neg)
model.to(device)
if opts.fp16:
model = amp.initialize(model, enabled=opts.fp16, opt_level='O2')
eval_dataloader = DataLoader(eval_dataset, batch_size=opts.batch_size,
num_workers=opts.n_workers,
pin_memory=opts.pin_mem,
collate_fn=vr_full_eval_collate)
eval_dataloader = PrefetchLoader(eval_dataloader)
_, results = validate_full_vr(
model, eval_dataloader, opts.split, opts, model_opts)
result_dir = f'{opts.output_dir}/results_{opts.split}'
if not exists(result_dir) and rank == 0:
os.makedirs(result_dir)
all_results_list = all_gather_list(results)
if hvd.rank() == 0:
all_results = {"video2idx": all_results_list[0]["video2idx"]}
for rank_id in range(hvd.size()):
for key, val in all_results_list[rank_id].items():
if key == "video2idx":
continue
if key not in all_results:
all_results[key] = []
all_results[key].extend(all_results_list[rank_id][key])
LOGGER.info('All results joined......')
save_json(
all_results,
f'{result_dir}/results_{opts.checkpoint}_all.json')
LOGGER.info('All results written......')
@torch.no_grad()
def validate_full_vr(model, val_loader, split, opts, model_opts):
LOGGER.info("start running full VR evaluation"
f"on {opts.task} {split} split...")
model.eval()
n_ex = 0
st = time()
val_log = {}
has_gt_target = True # MSRVTT test set has annotations
try:
video2idx_global = val_loader.dataset.vid2idx[split]
except Exception:
video2idx_global = val_loader.dataset.vid2idx
video_ids = sorted(list(video2idx_global.keys()))
video2idx_local = {e: i for i, e in enumerate(video_ids)}
query_data = val_loader.dataset.query_data
partial_query_data = []
total_frame_embeddings = None
video_batch, video_idx = [], []
max_clip_len = 0
for video_i, (vid, vidx) in tqdm(enumerate(video2idx_local.items()),
desc="Computing Video Embeddings",
total=len(video2idx_local)):
video_item = val_loader.dataset.video_db[vid]
video_batch.append(video_item)
video_idx.append(vidx)
if len(video_batch) == opts.vr_eval_video_batch_size or\
video_i == len(video2idx_local) - 1:
video_batch = move_to_cuda(video_collate(video_batch))
# Safeguard fp16
for k, item in video_batch.items():
if isinstance(item, torch.Tensor) and\
item.dtype == torch.float32:
video_batch[k] = video_batch[k].to(
dtype=next(model.parameters()).dtype)
curr_frame_embeddings = model.v_encoder(video_batch, 'repr')
curr_c_attn_masks = video_batch['c_attn_masks']
curr_clip_len = curr_frame_embeddings.size(-2)
assert curr_clip_len <= model_opts.max_clip_len
if total_frame_embeddings is None:
feat_dim = curr_frame_embeddings.size(-1)
total_frame_embeddings = torch.zeros(
(len(video2idx_local), model_opts.max_clip_len, feat_dim),
dtype=curr_frame_embeddings.dtype,
device=curr_frame_embeddings.device)
total_c_attn_masks = torch.zeros(
(len(video2idx_local), model_opts.max_clip_len),
dtype=curr_c_attn_masks.dtype,
device=curr_frame_embeddings.device)
indices = torch.LongTensor(video_idx)
total_frame_embeddings[indices, :curr_clip_len] =\
curr_frame_embeddings
total_c_attn_masks[indices, :curr_clip_len] =\
curr_c_attn_masks
max_clip_len = max(max_clip_len, curr_clip_len)
video_batch, video_idx = [], []
total_frame_embeddings = total_frame_embeddings[:, :max_clip_len, :]
total_c_attn_masks = total_c_attn_masks[:, :max_clip_len]
sorted_q2c_indices, sorted_q2c_scores = None, None
total_qids, total_vids = [], []
for batch in tqdm(val_loader, desc="Computing q2vScores"):
qids = batch['qids']
vids = batch['vids']
del batch['targets']
del batch['qids']
del batch['vids']
total_qids.extend(qids)
total_vids.extend(vids)
for qid in qids:
# fix msrvtt query data to have tvr format
gt = query_data[qid]
gt["desc_id"] = qid
gt["vid_name"] = gt["clip_name"]
partial_query_data.append(gt)
# Safeguard fp16
for k, item in batch.items():
if isinstance(item, torch.Tensor) and item.dtype == torch.float32:
batch[k] = batch[k].to(
dtype=next(model.parameters()).dtype)
# FIXME
_q2video_scores = model.get_pred_from_raw_query(
total_frame_embeddings, total_c_attn_masks, **batch,
cross=True, val_gather_gpus=False)
n_ex += len(qids)
_q2video_scores = _q2video_scores.float()
q2video_scores = _q2video_scores
_sorted_q2c_scores, _sorted_q2c_indices = \
torch.topk(q2video_scores, model_opts.max_vr_video,
dim=1, largest=True)
if sorted_q2c_indices is None:
sorted_q2c_indices = _sorted_q2c_indices.cpu().numpy()
sorted_q2c_scores = _sorted_q2c_scores.cpu().numpy()
else:
sorted_q2c_indices = np.concatenate(
(sorted_q2c_indices, _sorted_q2c_indices.cpu().numpy()),
axis=0)
sorted_q2c_scores = np.concatenate(
(sorted_q2c_scores, _sorted_q2c_scores.cpu().numpy()),
axis=0)
vr_res = []
for vr_i, (_sorted_q2c_scores_row, _sorted_q2c_indices_row) in tqdm(
enumerate(
zip(sorted_q2c_scores[:, :100],
sorted_q2c_indices[:, :100])),
desc="[VR] Loop over queries to generate predictions",
total=len(total_qids)):
cur_vr_redictions = []
for v_score, v_meta_idx in zip(_sorted_q2c_scores_row,
_sorted_q2c_indices_row):
video_idx = video2idx_global[video_ids[v_meta_idx]]
cur_vr_redictions.append([video_idx, 0, 0, float(v_score)])
cur_query_pred = dict(desc_id=total_qids[vr_i],
desc="",
predictions=cur_vr_redictions)
vr_res.append(cur_query_pred)
eval_res = dict(VR=vr_res)
eval_res = {k: v for k, v in eval_res.items() if len(v) != 0}
eval_res["video2idx"] = video2idx_global
eval_submission = get_submission_top_n(
eval_res, top_n=model_opts.max_vr_video)
if has_gt_target:
metrics = eval_retrieval(eval_submission, partial_query_data,
iou_thds=VCMR_IOU_THDS,
match_number=True,
verbose=False,
use_desc_type=False)
if model_opts.distributed_eval:
n_ex_per_rank = all_gather_list(n_ex)
metrics_per_rank = all_gather_list(metrics)
else:
n_ex_per_rank = [n_ex]
metrics_per_rank = [metrics]
n_ex = sum(n_ex_per_rank)
val_log = {}
gathered_metrics = {}
for task_type, task_metric in metrics.items():
gathered_metrics[task_type] = {}
for k in task_metric.keys():
if k == "desc_type_ratio":
continue
gathered_v = 0
for idx, n in enumerate(n_ex_per_rank):
gathered_v += n*metrics_per_rank[idx][task_type][k]
gathered_v = gathered_v / n_ex
gathered_metrics[task_type][k] = gathered_v
val_log[
f'valid_{split}_{task_type}/{task_type}_{k}'] = gathered_v
LOGGER.info("metrics_VR \n{}".format(pprint.pformat(
gathered_metrics["VR"], indent=4)))
tot_time = time()-st
val_log.update(
{f'valid/vr_{split}_ex_per_s': n_ex/tot_time})
LOGGER.info(f"validation finished in {int(tot_time)} seconds")
model.train()
return val_log, eval_submission
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--sub_txt_db",
default="/txt/msrvtt_subtitles.db",
type=str,
help="The input video subtitle corpus. (LMDB)")
parser.add_argument("--vfeat_db",
default="/video/msrvtt", type=str,
help="The input video frame features.")
parser.add_argument("--query_txt_db",
default="/txt/msrvtt_val.db",
type=str,
help="The input test query corpus. (LMDB)")
parser.add_argument("--split", choices=["val", "test"],
default="val", type=str,
help="The input query split")
parser.add_argument("--task", choices=["msrvtt_video_sub",
"msrvtt_video_only"],
default="msrvtt_video_sub", type=str,
help="The evaluation vr task")
parser.add_argument("--checkpoint",
default=None, type=str,
help="pretrained model checkpoint steps")
parser.add_argument("--batch_size",
default=80, type=int,
help="number of queries in a batch")
parser.add_argument("--vr_eval_video_batch_size",
default=50, type=int,
help="number of videos in a batch")
parser.add_argument(
"--output_dir", default=None, type=str,
help="The output directory where the model checkpoints will be "
"written.")
# device parameters
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead "
"of 32-bit")
parser.add_argument('--n_workers', type=int, default=4,
help="number of data workers")
parser.add_argument('--pin_mem', action='store_true',
help="pin memory")
args = parser.parse_args()
# options safe guard
# TODO
main(args)
|
raspi_install/download_assets.py | theendsofinvention/cartoonify | 1,991 | 12636097 | <gh_stars>1000+
import six.moves.urllib as urllib
from pathlib import Path
import jsonlines
import click
import tarfile
import os
import sys
root = Path(__file__).parent
label_map_path = root / '..' / 'cartoonify' / 'app' / 'label_mapping.jsonl'
download_path = root / '..' / 'cartoonify' / 'downloads'
quickdraw_dataset_url = 'https://storage.googleapis.com/quickdraw_dataset/full/binary/'
tensorflow_model_download_url = 'http://download.tensorflow.org/models/object_detection/'
tensorflow_model_name = 'ssd_mobilenet_v1_coco_2017_11_17'
model_path = download_path / 'detection_models' / tensorflow_model_name / 'frozen_inference_graph.pb'
def main():
download_drawing_dataset()
download_tensorflow_model()
print('finished')
def download_drawing_dataset():
try:
path = download_path / 'drawing_dataset'
with jsonlines.open(str(label_map_path), mode='r') as reader:
category_mapping = reader.read()
print('checking whether drawing files already exist...')
drawing_categories = ['face', 't-shirt', 'pants'] + category_mapping.values()
missing_files = [file for file in drawing_categories if not Path(path / Path(file).with_suffix('.bin')).exists()]
if missing_files:
print('{} drawing files missing, downloading the following files: '.format(len(missing_files)))
for f in missing_files:
print(f)
download_recurse(quickdraw_dataset_url, path, missing_files)
except IOError as e:
print('label_mapping.jsonl not found')
def download_tensorflow_model():
print('checking if tensorflow model exists...')
if not model_path.exists():
print('tensorflow model missing, downloading the following file: \n {}'.format(str(model_path)))
filename = tensorflow_model_name + '.tar.gz'
opener = urllib.request.URLopener()
opener.retrieve(tensorflow_model_download_url + filename, filename)
print('extracting model from tarfile...')
tar_file = tarfile.open(filename)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, path=str(model_path.parents[1]))
def download(url, filename, path):
"""download file @ specified url and save it to path
"""
try:
if not Path(path).exists():
Path(path).mkdir()
fpath = Path(path) / filename
opener = urllib.request.URLopener()
opener.retrieve(url, str(fpath))
return fpath
except (urllib.error.HTTPError, urllib.error.URLError):
print('could not download file: {}'.format(filename))
def download_recurse(url, path, filenames):
"""download files from url
:param str url: the url to download from, ended with a '/'
:param str path: the directory to save the files to
:param list files: list of filenames to download
"""
path = Path(path)
with click.progressbar(filenames, label='downloading drawing dataset:') as files:
for file in files:
site = url + file.replace(' ', '%20') + '.bin'
fpath = download(site, file + '.bin', path)
def load_categories(path):
files = Path(path).glob('*.bin')
categories = [f.stem for f in files]
return categories
if __name__=='__main__':
main()
sys.exit() |
setup.py | edddyeddy/cnsenti | 140 | 12636131 | <filename>setup.py
from setuptools import setup
import setuptools
setup(
name='cnsenti', # 包名字
version='0.0.4', # 包版本
description='中文情感分析库(Chinese Sentiment))可对文本进行情绪分析、正负情感分析。', # 简单描述
author='大邓', # 作者
author_email='<EMAIL>', # 邮箱
url='https://github.com/thunderhit/eventextraction', # 包的主页
packages=setuptools.find_packages(),
package_data = {'':['dictionary/hownet/*.pkl','dictionary/dutir/*.pkl']}, #所有目录下的pkl词典文件
install_requires=['jieba', 'numpy'],
python_requires='>=3.5',
license="MIT",
keywords=['chinese text analysis', 'text analysis', 'sentiment', 'sentiment analysis', 'natural language processing'],
long_description=open('README.md').read(), # 读取的Readme文档内容
long_description_content_type="text/markdown") # 指定包文档格式为markdown
#py_modules = ['eventextraction.py']
|
scrapy/core/downloader/handlers/datauri.py | HyunTruth/scrapy | 9,953 | 12636138 | from w3lib.url import parse_data_uri
from scrapy.http import TextResponse
from scrapy.responsetypes import responsetypes
from scrapy.utils.decorators import defers
class DataURIDownloadHandler(object):
lazy = False
def __init__(self, settings):
super(DataURIDownloadHandler, self).__init__()
@defers
def download_request(self, request, spider):
uri = parse_data_uri(request.url)
respcls = responsetypes.from_mimetype(uri.media_type)
resp_kwargs = {}
if (issubclass(respcls, TextResponse) and
uri.media_type.split('/')[0] == 'text'):
charset = uri.media_type_parameters.get('charset')
resp_kwargs['encoding'] = charset
return respcls(url=request.url, body=uri.data, **resp_kwargs)
|
venv/lib/python3.6/site-packages/dirhunt/tests/test_directory_lists.py | Guillaume-Fernandez/phishfinder | 1,288 | 12636156 | <gh_stars>1000+
import unittest
from bs4 import BeautifulSoup
from dirhunt.directory_lists import ApacheDirectoryList, CommonDirectoryList
from dirhunt.processors import ProcessIndexOfRequest
from dirhunt.tests.base import CrawlerTestBase
class DirectoryListsTestBase(CrawlerTestBase):
html = ''
def get_beautiful_soup(self, html=None):
html = html or self.html
return BeautifulSoup(html, 'html.parser')
def get_processor(self):
return ProcessIndexOfRequest(None, self.get_crawler_url())
class TestApacheDirectoryLists(DirectoryListsTestBase, unittest.TestCase):
html = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
<html>
<head>
<title>Index of /wp-includes</title>
</head>
<body>
<h1>Index of /wp-includes</h1>
<pre><img src="/__apache/blank.gif" alt="Icon "> <a href="?C=N;O=D">Name</a>
<a href="?C=M;O=A">Last modified</a> <a href="?C=S;O=A">Size</a>
<a href="?C=D;O=A">Description</a><hr>
<img src="/__ovh_icons/back.gif" alt="[PARENTDIR]"> <a href="/">Parent Directory</a> -
<img src="/__apache/folder.gif" alt="[DIR]"> <a href="ID3/">ID3/</a> 2015-09-15 14:58 -
<img src="/__apache/folder.gif" alt="[DIR]"> <a href="IXR/">IXR/</a> 2018-02-16 14:29 -
<img src="/__apache/unknown.gif" alt="[ ]"> <a href="author-template.php">author-template.php</a>
2018-02-16 14:29 16K
<img src="/__apache/unknown.gif" alt="[ ]"> <a href="bookmark-template.php">bookmark-template.php</a>
2018-02-16 14:29 11K
</pre>
</body></html>
"""
def test_is_applicable(self):
beautiful_soup = self.get_beautiful_soup()
self.assertTrue(ApacheDirectoryList.is_applicable(self.html, self.get_crawler_url(), beautiful_soup))
def test_is_not_applicable(self):
beautiful_soup = self.get_beautiful_soup(TestCommonDirectoryList.html)
self.assertFalse(ApacheDirectoryList.is_applicable(TestCommonDirectoryList.html,
self.get_crawler_url(), beautiful_soup))
def test_get_links(self):
directory_list = ApacheDirectoryList(self.get_processor())
links = directory_list.get_links(self.html, self.get_beautiful_soup())
test_data = [(link.url, link.extra) for link in links]
self.assertEqual(test_data, [
('http://domain.com/', {}),
('http://domain.com/path/ID3/', {'created_at': '2015-09-15 14:58'}),
('http://domain.com/path/IXR/', {'created_at': '2018-02-16 14:29'}),
('http://domain.com/path/author-template.php', {'created_at': '2018-02-16 14:29', 'filesize': '16K'}),
('http://domain.com/path/bookmark-template.php', {'created_at': '2018-02-16 14:29', 'filesize': '11K'}),
])
class TestCommonDirectoryList(DirectoryListsTestBase, unittest.TestCase):
html = """
<html><head><title>Index Of</title></head><body>
<a href="..">Top</a>
<a href="dir/">dir</a>
<a href="foo.php">foo.php</a>
<a href="error_log">error_log</a>
<a href="/spam/eggs">Eggs</a></body></html>
"""
urls = [
'http://domain.com/',
'http://domain.com/path/dir/',
'http://domain.com/path/foo.php',
'http://domain.com/path/error_log',
'http://domain.com/spam/eggs',
]
def test_process(self):
directory_list = CommonDirectoryList(self.get_processor())
links = directory_list.get_links(self.html, self.get_beautiful_soup())
urls = [link.url for link in links]
self.assertEqual(urls, self.urls)
def test_is_applicable(self):
beautiful_soup = self.get_beautiful_soup()
self.assertTrue(CommonDirectoryList.is_applicable(self.html, self.get_crawler_url(), beautiful_soup))
|
hc/api/migrations/0015_auto_20151022_1008.py | karthikprabhu/healthchecks | 4,813 | 12636172 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [("api", "0014_auto_20151019_2039")]
operations = [
migrations.AlterIndexTogether(
name="check", index_together=set([("status", "user", "alert_after")])
)
]
|
lint/queue.py | arcturus140/SublimeLinter | 646 | 12636177 | <reponame>arcturus140/SublimeLinter
import threading
MYPY = False
if MYPY:
from typing import Callable, Dict, Hashable
Key = Hashable
# Map from key to threading.Timer objects
timers = {} # type: Dict[Key, threading.Timer]
def debounce(callback, delay, key):
# type: (Callable[[], None], float, Key) -> threading.Timer
try:
timers[key].cancel()
except KeyError:
pass
timers[key] = timer = threading.Timer(delay, callback)
timer.start()
return timer
def cleanup(key):
# type: (Key) -> None
try:
timers.pop(key).cancel()
except KeyError:
pass
def unload():
while True:
try:
_key, timer = timers.popitem()
except KeyError:
return
else:
timer.cancel()
|
mamba/setup.py | jjerphan/mamba | 2,262 | 12636230 | <filename>mamba/setup.py
# Copyright (c) 2019, QuantStack and Mamba Contributors
#
# Distributed under the terms of the BSD 3-Clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup
here = os.path.dirname(os.path.abspath(__file__))
version_ns = {}
with open(os.path.join(here, "mamba", "_version.py")) as f:
exec(f.read(), {}, version_ns)
__version__ = version_ns["__version__"]
data_files = [
("etc/profile.d", ["mamba/shell_templates/mamba.sh"]),
]
if sys.platform == "win32":
data_files.append(("condabin", ["mamba/shell_templates/mamba.bat"]),)
data_files.append(
("Library/bin", ["mamba/shell_templates/win_redirect/mamba.bat"]),
)
setup(
name="mamba",
version=__version__,
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/mamba-org/mamba",
description="A fast, libsolv based solver and installer for conda packages.",
packages=["mamba"],
entry_points={"console_scripts": ["mamba = mamba.mamba:main"]},
long_description="A (hopefully faster) reimplementation of the slow bits of conda.",
install_requires=["conda", "libmambapy"],
extras_require={"test": ["pytest", "pytest-lazy-fixture"]},
data_files=data_files,
include_package_data=True,
zip_safe=False,
)
|
locations/spiders/freshmarket.py | davidchiles/alltheplaces | 297 | 12636243 | # -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
class FreshMarketSpider(scrapy.Spider):
name = "freshmarket"
item_attributes = { 'brand': "Fresh Market" }
allowed_domains = ['thefreshmarket.com']
start_urls = (
'https://www.thefreshmarket.com/your-market/store-locator/',
)
def parse(self, response):
json_data = response.xpath('//script[@data-reactid="39"]/text()').extract_first().rstrip(';').split('=')[-1]
data = json.loads(json_data)
allStores = data['stores']['allStores']
for store in allStores:
properties = {
'name': store['storeName'],
'ref': store['storeNumber'],
'addr_full': store['address'],
'city': store['city'],
'state': store['state'],
'postcode': store['postalCode'],
'phone': store['phoneNumber'],
'website': "https://www.thefreshmarket.com/my-market/store/" + store['slug'],
'opening_hours': store['moreStoreHours'],
'lat': float(store['storeLocation']['lat']),
'lon': float(store['storeLocation']['lon']),
}
yield GeojsonPointItem(**properties) |
packages/pyright-internal/src/tests/samples/paramSpec26.py | Jasha10/pyright | 3,934 | 12636263 | # This sample tests the case where a generic class parameterized by a
# ParamSpec is specialized using a Concatenate[] type argument.
from typing import ParamSpec, Concatenate, Generic, Callable, Any
P = ParamSpec("P")
class A(Generic[P]):
def __init__(self, func: Callable[P, Any]) -> None:
...
def func1(baz: A[Concatenate[int, P]]) -> A[P]:
...
def test(a: int, b: str) -> str:
...
val1 = A(test)
reveal_type(val1, expected_text="A[(a: int, b: str)]")
val2 = func1(val1)
reveal_type(val2, expected_text="A[(b: str)]")
|
syfertext/tokenizers/default_tokenizer.py | Dat-Boi-Arjun/SyferText | 204 | 12636269 | class DefaultTokenizer:
def __init__(self, prefixes, suffixes, infixes, exceptions):
self.prefixes = prefixes
self.suffixes = suffixes
self.infixes = infixes
self.exceptions = exceptions
def __call__(self, text: str):
return text.split(" ")
|
src/pipelines/hospitalizations/xx_opencovid.py | alvarosg/covid-19-open-data | 430 | 12636279 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from pandas import DataFrame
from lib.data_source import DataSource
from lib.time import datetime_isoformat
from lib.utils import table_rename
class OpenCovidDataSource(DataSource):
def parse_dataframes(
self, dataframes: Dict[str, DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
data = table_rename(
dataframes[0],
parse_opts.get(
"column_adapter",
{
"discharged_cumulative": "total_discharged",
"hospitalized_current": "current_hospitalized",
"number hospitalised": "current_hospitalized",
"hospitalized_cumulative": "total_hospitalized",
"icu_current": "current_intensive_care",
"number in icu": "current_intensive_care",
"icu_cumulative": "cumulative_intensive_care",
"ventilator_current": "current_ventilator",
"ventilator_cumulative": "cumulative_ventilator",
"new hospital admissions": "new_hospitalized",
"new intensive care admissions": "new_intensive_care",
},
),
)
# Add key and parse date in ISO format
data["key"] = parse_opts.get("key")
data["date"] = data[parse_opts.get("date_column", "date")].astype(str)
date_format = parse_opts.get("date_format", "%Y-%m-%d")
data.date = data.date.apply(lambda x: datetime_isoformat(x, date_format))
return data
|
dirscan/dirsearch/thirdparty/oset/tests.py | imfiver/Sec-Tools | 144 | 12636284 | #!/usr/bin/env python
# -*- mode:python; tab-width: 2; coding: utf-8 -*-
"""Partially backported python ABC classes"""
import doctest
import unittest
optionflags = (
doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS | doctest.REPORT_ONLY_FIRST_FAILURE
)
TESTFILES = ["pyoset.txt"]
def test_suite():
"""Simple tes suite"""
globs = {}
try:
from pprint import pprint
globs["pprint"] = pprint
except Exception:
pass
try:
from interlude import interact
globs["interact"] = interact
except Exception:
pass
return unittest.TestSuite(
[
doctest.DocFileSuite(file, optionflags=optionflags, globs=globs)
for file in TESTFILES
]
)
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
|
scripts/glob-search.py | kasymovga/taisei | 573 | 12636291 | #!/usr/bin/env python3
from taiseilib.common import (
DirPathType,
run_main,
)
from pathlib import Path
import fnmatch
def main(args):
import argparse
parser = argparse.ArgumentParser(description='Search directory by multiple glob patterns.', prog=args[0])
parser.add_argument('directory',
type=DirPathType,
help='the directory to search'
)
parser.add_argument('patterns',
metavar='pattern',
nargs='+',
help='a glob pattern'
)
args = parser.parse_args(args[1:])
for path in (p.relative_to(args.directory) for p in args.directory.rglob('*')):
path = str(path)
for pattern in args.patterns:
if fnmatch.fnmatchcase(path, pattern):
print(path)
break
if __name__ == '__main__':
run_main(main)
|
dist/server/script/core/colorconsole.py | zhoulhb/teleport | 640 | 12636305 | <filename>dist/server/script/core/colorconsole.py
# -*- coding: utf-8 -*-
import os
import sys
import platform
import traceback
__all__ = ['o', 'v', 'i', 'w', 'e', 'f']
# ======================================
# 颜色
# ======================================
CR_RESTORE = 0 # 恢复正常 - 浅灰色
# BOLD = "[1m" # 高亮显示
# UNDERSCORE = "[4m" # 下划线
# REVERSE = "[7m" # 反白显示
CR_BLACK = 1 # 黑色
CR_LIGHT_GRAY = 2 # 浅灰色 - 普通文字
CR_GRAY = 3 # 深灰色 - 捕获别的命令的输出
CR_WHITE = 4 # 白色
CR_RED = 5 # 红色
CR_GREEN = 6 # 绿色
CR_YELLOW = 7 # 黄色 - Windows平台称之为棕色(Brown)
CR_BLUE = 8 # 蓝色
CR_MAGENTA = 9 # 紫红
CR_CYAN = 10 # 青色
CR_LIGHT_RED = 11 # 亮红色 - 失败
CR_LIGHT_GREEN = 12 # 亮绿色 - 成功
CR_LIGHT_YELLOW = 13 # 亮黄色 - 重要
CR_LIGHT_BLUE = 14 # 亮蓝色 - 其实在黑色背景上还是比较深
CR_LIGHT_MAGENTA = 15 # 亮紫色 - 警告
CR_LIGHT_CYAN = 16 # 亮青色
# CR_VERBOSE = CR_LIGHT_GRAY
# CR_NORMAL = CR_WHITE
# CR_INFO = CR_GREEN
# CR_WARN = CR_LIGHT_YELLOW
# CR_ERROR = CR_LIGHT_RED
CR_VERBOSE = CR_RESTORE
CR_NORMAL = CR_GRAY
CR_INFO = CR_GREEN
CR_WARN = CR_YELLOW
CR_ERROR = CR_LIGHT_RED
COLORS = {
# 常量定义 Linux色彩 WinConsole色彩
CR_RESTORE: ('[0m', 7), # 7 = 浅灰色 - 普通文字
CR_BLACK: ('[0;30m', 0), # 0 = 黑色
CR_RED: ("[0;31m", 4), # 红色
CR_GREEN: ("[0;32m", 2), # 绿色
CR_YELLOW: ("[0;33m", 6), # 黄色 - Windows平台称之为棕色(Brown)
CR_BLUE: ("[0;34m", 1), # 蓝色
CR_MAGENTA: ("[0;35m", 5), # 紫红
CR_CYAN: ("[0;36m", 3), # 青色
CR_LIGHT_GRAY: ('[0;37m', 7), # 浅灰色 - 普通文字
CR_GRAY: ("[1;30m", 8), # 深灰色 - 捕获别的命令的输出
CR_LIGHT_RED: ("[1;31m", 12), # 亮红色 - 失败
CR_LIGHT_GREEN: ("[1;32m", 10), # 亮绿色 - 成功
CR_LIGHT_YELLOW: ("[1;33m", 14), # 亮黄色 - 重要
CR_LIGHT_BLUE: ("[1;34m", 9), # 亮蓝色 - 其实在黑色背景上还是比较深
CR_LIGHT_MAGENTA: ("[1;35m", 13), # 亮紫色 - 警告
CR_LIGHT_CYAN: ("[1;36m", 11), # 亮青色
CR_WHITE: ("[1;37m", 15) # 白色
}
class ColorConsole:
"""
:type _win_color : Win32ColorConsole
"""
def __init__(self):
# self._log_console = self._console_default # 输出到控制台的方式,为None时表示不输出到控制台
# self._console_set_color = self._console_set_color_default
self._sep = ' '
self._end = '\n'
self._win_color = None
self.o = self._func_output
self.v = self._func_verbose
self.n = self._func_normal
self.i = self._func_info
self.w = self._func_warn
self.e = self._func_error
self.f = self._func_fail
if sys.stdout is None:
self.o = self._func_pass
self.v = self._func_pass
self.n = self._func_pass
self.i = self._func_pass
self.w = self._func_pass
self.e = self._func_pass
self.f = self._func_pass
# self._log_console = self._func_pass
# self._console_set_color = self._console_set_color_default
else:
# python2.7 on Ubuntu, sys.platform is 'linux2', so we use platform.system() instead.
_platform = platform.system().lower()
if _platform == 'linux' or _platform == 'darwin':
self._console_set_color = self._console_set_color_linux
self._console_restore_color = self._console_restore_color_linux
elif _platform == 'windows':
if 'TERM' in os.environ and os.environ['TERM'] in ['xterm']:
self._console_set_color = self._console_set_color_linux
self._console_restore_color = self._console_restore_color_linux
else:
self._win_color = Win32ColorConsole()
if self._win_color.available():
self._console_set_color = self._console_set_color_win
self._console_restore_color = self._console_restore_color_win
else:
self._console_set_color = self._func_pass
self._console_restore_color = self._func_pass
def set_default(self, *args, **kwargs):
if 'sep' in kwargs:
self._sep = kwargs['sep']
if 'end' in kwargs:
self._end = kwargs['end']
def _func_pass(self, *args, **kwargs):
# do nothing.
pass
def _func_output(self, *args, **kwargs):
sep = kwargs['sep'] if 'sep' in kwargs else self._sep
end = kwargs['end'] if 'end' in kwargs else self._end
first = True
for x in args:
if not first:
sys.stdout.writelines(sep)
if isinstance(x, tuple):
cl = x[0]
z = x[1:]
self._console_set_color(cl)
self._console_output(*z, sep='', end='')
sys.stdout.flush()
elif isinstance(x, str):
self._console_output(x, sep='', end='')
sys.stdout.flush()
else:
raise RuntimeError('Invalid param.')
sys.stdout.writelines(end)
self._console_restore_color()
sys.stdout.flush()
def _func_verbose(self, *args, **kwargs):
self._console_set_color(CR_VERBOSE)
self._console_output(*args, **kwargs)
self._console_restore_color()
sys.stdout.flush()
# 普通的日志数据
def _func_normal(self, *args, **kwargs):
self._console_set_color(CR_NORMAL)
self._console_output(*args, **kwargs)
self._console_restore_color()
sys.stdout.flush()
# 重要信息
def _func_info(self, *args, **kwargs):
self._console_set_color(CR_INFO)
self._console_output(*args, **kwargs)
self._console_restore_color()
sys.stdout.flush()
# 警告
def _func_warn(self, *args, **kwargs):
self._console_set_color(CR_WARN)
self._console_output(*args, **kwargs)
self._console_restore_color()
sys.stdout.flush()
def _func_error(self, *args, **kwargs):
self._console_set_color(CR_ERROR)
self._console_output(*args, **kwargs)
self._console_restore_color()
sys.stdout.flush()
def _func_fail(self, *args, **kwargs):
self._console_set_color(CR_ERROR)
self._console_output('[FAIL] ', end='')
self._console_output(*args, **kwargs)
_type, _value, _tb = sys.exc_info()
if _type is not None:
x = traceback.format_exception_only(_type, _value)
self._console_output('[EXCEPTION] ', end='')
self._console_output(x[0], end='')
x = traceback.extract_tb(_tb)
c = len(x)
self._console_set_color(CR_RED)
for i in range(0, c):
self._console_output(os.path.abspath(x[i][0]), '(', x[i][1], '): ', x[i][3], sep='')
else:
s = traceback.extract_stack()
c = len(s)
self._console_set_color(CR_RED)
for i in range(2, c):
self._console_output(' ', os.path.abspath(s[c - i - 1][0]), '(', s[c - i - 1][1], '): ', s[c - i - 1][3], sep='')
self._console_restore_color()
sys.stdout.flush()
def _console_set_color_win(self, cr=None):
if cr is None:
return
self._win_color.set_color(COLORS[cr][1])
sys.stdout.flush()
def _console_set_color_linux(self, cr=None):
if cr is None:
return
sys.stdout.writelines('\x1B')
sys.stdout.writelines(COLORS[cr][0])
sys.stdout.flush()
def _console_restore_color_win(self):
self._win_color.set_color(COLORS[CR_RESTORE][1])
sys.stdout.flush()
def _console_restore_color_linux(self):
sys.stdout.writelines('\x1B[0m')
sys.stdout.flush()
def _console_output(self, *args, **kwargs):
sep = kwargs['sep'] if 'sep' in kwargs else self._sep
end = kwargs['end'] if 'end' in kwargs else self._end
first = True
for x in args:
if not first:
sys.stdout.writelines(sep)
first = False
if isinstance(x, str):
sys.stdout.writelines(x)
continue
else:
sys.stdout.writelines(x.__str__())
sys.stdout.writelines(end)
sys.stdout.flush()
def test(self):
self.o('o()......')
self.v('v()......')
self.n('n()......')
self.i('i()......')
self.w('w()......')
self.e('e()......')
self.f('f()......')
self.v('test auto\nsplit lines.\nYou should see\nmulti-lines.\n')
class Win32ColorConsole:
def __init__(self):
from ctypes import WINFUNCTYPE, windll
from ctypes.wintypes import BOOL, HANDLE, DWORD, WORD
self.__original_stderr = sys.stderr
self.__stdout = None
self.__SetConsoleTextAttribute = None
# Work around <http://bugs.python.org/issue6058>.
# codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)
# Make Unicode console output work independently of the current code page.
# This also fixes <http://bugs.python.org/issue1602>.
# Credit to <NAME> <http://blogs.msdn.com/b/michkap/archive/2010/04/07/9989346.aspx>
# and TZOmegaTZIOY
# <http://stackoverflow.com/questions/878972/windows-cmd-encoding-change-causes-python-crash/1432462#1432462>.
try:
# <http://msdn.microsoft.com/en-us/library/ms683231(VS.85).aspx>
# HANDLE WINAPI GetStdHandle(DWORD nStdHandle);
# returns INVALID_HANDLE_VALUE, NULL, or a valid handle
#
# <http://msdn.microsoft.com/en-us/library/aa364960(VS.85).aspx>
# DWORD WINAPI GetFileType(DWORD hFile);
#
# <http://msdn.microsoft.com/en-us/library/ms683167(VS.85).aspx>
# BOOL WINAPI GetConsoleMode(HANDLE hConsole, LPDWORD lpMode);
STD_OUTPUT_HANDLE = DWORD(-11)
INVALID_HANDLE_VALUE = DWORD(-1).value
GetStdHandle = WINFUNCTYPE(HANDLE, DWORD)(("GetStdHandle", windll.kernel32))
self.__SetConsoleTextAttribute = WINFUNCTYPE(BOOL, HANDLE, WORD)(("SetConsoleTextAttribute", windll.kernel32))
self.__stdout = GetStdHandle(STD_OUTPUT_HANDLE)
if self.__stdout == INVALID_HANDLE_VALUE:
self.__stdout = None
except Exception as e:
self.__stdout = None
self._complain("exception %r while fixing up sys.stdout and sys.stderr\n" % (str(e),))
# If any exception occurs in this code, we'll probably try to print it on stderr,
# which makes for frustrating debugging if stderr is directed to our wrapper.
# So be paranoid about catching errors and reporting them to original_stderr,
# so that we can at least see them.
@staticmethod
def _complain(message):
# print >> self.__original_stderr, message if isinstance(message, str) else repr(message)
sys.stderr.writelines(message)
def available(self):
if self.__stdout is None or self.__SetConsoleTextAttribute is None:
return False
else:
return True
def set_color(self, color):
# if not self.available():
# return
self.__SetConsoleTextAttribute(self.__stdout, color)
_cc = ColorConsole()
del ColorConsole
# _cc.test()
def set_default(*args, **kwargs):
_cc.set_default(*args, **kwargs)
def o(*args, **kwargs):
_cc.o(*args, **kwargs)
def v(*args, **kwargs):
_cc.v(*args, **kwargs)
def n(*args, **kwargs):
_cc.n(*args, **kwargs)
def i(*args, **kwargs):
_cc.i(*args, **kwargs)
def w(*args, **kwargs):
_cc.w(*args, **kwargs)
def e(*args, **kwargs):
_cc.e(*args, **kwargs)
def f(*args, **kwargs):
_cc.f(*args, **kwargs)
|
examples/ridge_regressor_example.py | tushushu/Imilu | 407 | 12636313 | # -*- coding: utf-8 -*-
"""
@Author: tushushu
@Date: 2018-08-21 17:16:29
@Last Modified by: tushushu
@Last Modified time: 2018-08-21 17:16:29
"""
import os
os.chdir(os.path.split(os.path.realpath(__file__))[0])
import sys
sys.path.append(os.path.abspath(".."))
from imylu.linear_model.ridge import Ridge
from imylu.utils.load_data import load_boston_house_prices
from imylu.utils.model_selection import get_r2, train_test_split
from imylu.utils.preprocessing import min_max_scale
from imylu.utils.utils import run_time
@run_time
def main():
"""Tesing the performance of Ridge Regressor(stochastic)
"""
print("Tesing the performance of Ridge Regressor(stochastic)...")
# Load data
data, label = load_boston_house_prices()
data = min_max_scale(data)
# Split data randomly, train set rate 70%
data_train, data_test, label_train, label_test = train_test_split(data, label, random_state=10)
# Train model
reg = Ridge()
reg.fit(data=data_train, label=label_train, learning_rate=0.001, epochs=1000,
alpha=1e-7, method="stochastic", sample_rate=0.5, random_state=10)
# Model evaluation
get_r2(reg, data_test, label_test)
if __name__ == "__main__":
main()
|
dynamo/prediction/perturbation.py | xing-lab-pitt/dynamo-release | 236 | 12636322 | <filename>dynamo/prediction/perturbation.py
import numpy as np
from scipy.sparse import csr_matrix
import anndata
from typing import Union, Callable
from ..tools.cell_velocities import cell_velocities
from .utils import (
expr_to_pca,
pca_to_expr,
z_score,
z_score_inv,
)
from ..vectorfield.vector_calculus import (
rank_genes,
rank_cells,
rank_cell_groups,
vecfld_from_adata,
jacobian,
vector_transformation,
)
from ..vectorfield.scVectorField import vector_field_function_knockout
from ..vectorfield import SvcVectorField
from ..dynamo_logger import LoggerManager
def KO(
adata: anndata.AnnData,
KO_genes: Union[str, list],
vecfld: Union[None, Callable] = None,
vf_key: str = "VecFld",
basis: str = "pca",
emb_basis: str = "umap",
velocity_ko_wt_difference: bool = False,
add_ko_basis_key: Union[str, None] = None,
add_embedding_key: Union[str, None] = None,
store_vf_ko: bool = False,
add_vf_ko_key: Union[str, None] = None,
return_vector_field_class: bool = True,
):
"""In silico knockout genes (and thus the vector field function) and prediction of cell fate after knockout.
Parameters
----------
adata: :class:`~anndata.AnnData`
an Annodata object with the vector field function for PCA learned.
KO_genes:
The gene or list of genes that will be used to perform in-silico knockout.
vecfld:
The vector field function.
vf_key:
A key to the vector field functions in adata.uns.
basis:
The basis in which the vector field function is created.
emb_basis:
The embedding basis where the perturbed (KO) vector field function will be projected to.
velocity_ko_wt_difference:
Whether to use the difference from perturbed (KO) vector field to wildtype vector field in embedding space
instead of raw perturbation (KO) vector field. Using the difference may reveal the perturbation (KO) effects more
clearly.
add_ko_basis_key:
The key name for the velocity corresponds to the `basis` name whose associated vector field is perturbed
(KO).
add_embedding_key:
The key name for the velocity corresponds to the `embedding` name to which the high dimensional perturbed
(KO) vector field will be projected to.
store_vf_ko:
Whether to store the perturbed (KO) vector field function. By default it is False.
add_vf_ko_key:
The key to store the perturbed (KO) vector field function in adata.uns.
return_vector_field_class:
Whether to return the perturbed (KO) vector field class. By default it is True.
Returns
-------
If return_vector_field_class is True, return the perturbed (KO) vector field class and update objected with
perturbed (KO) vector field in both the PCA and low dimension space. If return_vector_field_class is False,
return nothing but updates the adata object.
"""
logger = LoggerManager.gen_logger("dynamo-KO")
if basis != "pca":
logger.error("Currently we can only perturb (KO) PCA space based vector field function.")
raise ValueError()
if vecfld is None:
vf = SvcVectorField()
vf.from_adata(adata, basis=basis, vf_key=vf_key)
else:
vf = vecfld
logger.info(f"In silico knockout {KO_genes}")
KO_genes = [KO_genes] if type(KO_genes) is str else KO_genes
vf_ko = vector_field_function_knockout(adata, vf, KO_genes)
if add_ko_basis_key is None:
x_basis_key, v_basis_key = "X_" + basis + "_KO", "velocity_" + basis + "_KO"
else:
if not add_ko_basis_key.startswith("velocity_"):
raise ValueError(f"add_ko_basis_key {add_ko_basis_key} must starts with `velocity_`")
x_basis_key, v_basis_key = "X_" + add_ko_basis_key.split("velocity_")[1], add_ko_basis_key
if add_embedding_key is None:
x_emb_key, v_emb_key = "X_" + emb_basis + "_KO", "velocity_" + emb_basis + "_KO"
else:
if not add_embedding_key.startswith("velocity_"):
raise ValueError(f"add_embedding_key {add_embedding_key} must starts with `velocity_`")
x_emb_key, v_emb_key = "X_" + add_embedding_key.split("velocity_")[1], add_embedding_key
logger.info_insert_adata(x_basis_key, "obsm")
adata.obsm[x_basis_key] = adata.obsm["X_" + basis].copy()
logger.info_insert_adata(v_basis_key, "obsm")
adata.obsm[v_basis_key] = vf_ko.get_V()
logger.info_insert_adata(x_emb_key, "obsm")
adata.obsm[x_emb_key] = adata.obsm["X_" + emb_basis].copy()
logger.info(f"Project the high dimensional vector field after KO to {emb_basis}.")
cell_velocities(
adata,
X=adata.obsm["X_" + basis],
V=adata.obsm["velocity_" + basis + "_KO"],
basis=emb_basis + "_KO",
enforce=True,
add_velocity_key=v_emb_key,
)
if velocity_ko_wt_difference:
adata.obsm[v_emb_key] -= adata.obsm["velocity_" + emb_basis]
if store_vf_ko:
if add_vf_ko_key is None:
add_vf_ko_key = "vf_KO"
logger.info_insert_adata(add_vf_ko_key, "uns")
adata.uns[add_vf_ko_key] = vf_ko
if return_vector_field_class:
return vf_ko
def perturbation(
adata: anndata.AnnData,
genes: Union[str, list],
expression: Union[float, list] = 10,
perturb_mode: str = "raw",
cells: Union[list, np.ndarray, None] = None,
zero_perturb_genes_vel: bool = False,
pca_key: Union[str, np.ndarray, None] = None,
PCs_key: Union[str, np.ndarray, None] = None,
pca_mean_key: Union[str, np.ndarray, None] = None,
basis: str = "pca",
emb_basis: str = "umap",
jac_key: str = "jacobian_pca",
X_pca: Union[np.ndarray, None] = None,
delta_Y: Union[np.ndarray, None] = None,
projection_method: str = "fp",
pertubation_method: str = "j_delta_x",
J_jv_delta_t: float = 1,
delta_t: float = 1,
add_delta_Y_key: str = None,
add_transition_key: str = None,
add_velocity_key: str = None,
add_embedding_key: str = None,
):
"""In silico perturbation of single-cells and prediction of cell fate after perturbation.
To simulate genetic perturbation and its downstream effects, we take advantage of the analytical Jacobian from our
vector field function. In particular, we first calculate the perturbation velocity vector:
.. math::
\\delta Y = J \\dot \\delta X
where the J is the analytical Jacobian, \\delta X is the perturbation vector (that is,
if overexpress gene i to expression 10 but downexpress gene j to -10 but keep others not changed, we have
delta X = [0, 0, 0, delta x_i = 10, 0, 0, .., x_j = -10, 0, 0, 0]). Because Jacobian encodes the instantaneous
changes of velocity of any genes after increasing any other gene, J \\dot \\delta X will produce the perturbation
effect vector after propagating the genetic perturbation (\\delta_X) through the gene regulatory network. We then
use X_pca and \\delta_Y as a pair (just like M_s and velocity_S) to project the perturbation vector to low
dimensional space. The \\delta_Y can be also used to identify the strongest responders of the genetic perturbation.
Parameters
----------
adata: :class:`~anndata.AnnData`
an Annodata object.
genes:
The gene or list of genes that will be used to perform in-silico perturbation.
expression:
The numerical value or list of values that will be used to encode the genetic perturbation. High positive
values indicates up-regulation while low negative value repression.
perturb_mode:
The mode for perturbing the gene expression vector, either `raw` or `z_score`.
cells:
The list of the cell indices that we will perform the perturbation.
zero_perturb_genes_vel:
Whether to set the peturbed genes' perturbation velocity vector values to be zero.
pca_key:
The key that corresponds to pca embedding. Can also be the actual embedding matrix.
PCs_key:
The key that corresponds to PC loading embedding. Can also be the actual loading matrix.
pca_mean_key:
The key that corresponds to means values that used for pca projection. Can also be the actual means matrix.
basis:
The key that corresponds to the basis from which the vector field is reconstructed.
jac_key:
The key to the jacobian matrix.
X_pca:
The pca embedding matrix.
delta_Y:
The actual perturbation matrix. This argument enables more customized perturbation schemes.
projection_method:
The approach that will be used to project the high dimensional perturbation effect vector to low dimensional
space.
pertubation_method:
The approach that will be used to calculate the perturbation effect vector after in-silico genetic
perturbation. Can only be one of `"j_delta_x", "j_x_prime", "j_jv", "f_x_prime", "f_x_prime_minus_f_x_0"`
J_jv_delta_t:
If pertubation_method is `j_jv`, this will be used to determine the $\\delta x = jv \\delta t_{jv}$
delta_t:
This will be used to determine the $\\delta Y = jv \\delta t$
add_delta_Y_key:
The key that will be used to store the perturbation effect matrix. Both the pca dimension matrix (stored in
obsm) or the matrix of the original gene expression space (stored in .layers) will use this key. By default
it is None and is set to be `method + '_perturbation'`.
add_transition_key: str or None (default: None)
The dictionary key that will be used for storing the transition matrix in .obsp.
add_velocity_key: str or None (default: None)
The dictionary key that will be used for storing the low dimensional velocity projection matrix in .obsm.
add_embedding_key: str or None (default: None)
The dictionary key that will be used for storing the low dimensional velocity projection matrix in .obsm.
Returns
-------
adata: :class:`~anndata.AnnData`
Returns an updated :class:`~anndata.AnnData` with perturbation effect matrix, projected perturbation vectors
, and a cell transition matrix based on the perturbation vectors.
"""
if pertubation_method.lower() not in ["j_delta_x", "j_x_prime", "j_jv", "f_x_prime", "f_x_prime_minus_f_x_0"]:
raise ValueError(
f"your method is set to be {pertubation_method.lower()} but must be one of `j_delta_x`, `j_x_prime`, "
"`j_jv`,`f_x_prime`, `f_x_prime_minus_f_x_0`"
)
logger = LoggerManager.get_main_logger()
logger.info(
"In silico perturbation of single-cells and prediction of cell fate after perturbation...",
)
if type(genes) == str:
genes = [genes]
if type(expression) in [int, float]:
expression = [expression]
pca_genes = adata.var_names[adata.var.use_for_pca]
valid_genes = pca_genes.intersection(genes)
if len(valid_genes) == 0:
raise ValueError("genes to perturb must be pca genes (genes used to perform the pca dimension reduction).")
if len(expression) > 1:
if len(expression) != len(valid_genes):
raise ValueError(
"if you want to set different values for different genes, you need to ensure those genes "
"are included in the pca gene list and the length of those genes is the same as that of the"
"expression."
)
if X_pca is None:
logger.info("Retrive X_pca, PCs, pca_mean...")
pca_key = "X_pca" if pca_key is None else pca_key
PCs_key = "PCs" if PCs_key is None else PCs_key
pca_mean_key = "pca_mean" if pca_mean_key is None else pca_mean_key
X_pca = adata.obsm[pca_key]
if delta_Y is None:
logger.info("Calculate perturbation effect matrix via \\delta Y = J \\dot \\delta X....")
if type(PCs_key) == np.ndarray:
PCs = PCs_key
else:
PCs = adata.uns[PCs_key]
if type(pca_mean_key) == np.ndarray:
means = pca_mean_key
else:
means = adata.uns[pca_mean_key]
# project pca gene expression back to original gene expression:
X = pca_to_expr(X_pca, PCs, means)
# get gene position
gene_loc = [adata.var_names[adata.var.use_for_pca].get_loc(i) for i in valid_genes]
# in-silico perturbation
X_perturb = X.copy()
if cells is None:
cells = np.arange(adata.n_obs)
for i, gene in enumerate(gene_loc):
if perturb_mode == "z_score":
x = X_perturb[:, gene]
_, m, s = z_score(x, 0)
X_perturb[cells, gene] = z_score_inv(expression[i], m, s)
elif perturb_mode == "raw":
X_perturb[cells, gene] = expression[i]
else:
raise NotImplementedError(f"The perturbation mode {perturb_mode} is not supported.")
# project gene expression back to pca space
X_perturb_pca = expr_to_pca(X_perturb, PCs, means)
# calculate Jacobian
if jac_key not in adata.uns_keys():
jacobian(adata, regulators=valid_genes, effectors=valid_genes)
Js = adata.uns[jac_key]["jacobian"] # pcs x pcs x cells
# calculate perturbation velocity vector: \delta Y = J \dot \delta X:
delta_Y = np.zeros_like(X_pca)
# get the actual delta_X:
if pertubation_method.lower() in ["j_delta_x", "j_x_prime", "j_jv"]:
if pertubation_method.lower() == "j_delta_x":
delta_X = X_perturb_pca - X_pca
elif pertubation_method.lower() == "j_x_prime":
delta_X = X_perturb_pca
elif pertubation_method.lower() == "j_jv":
tmp = X_perturb_pca - X_pca
delta_X = np.zeros_like(X_pca)
for i in np.arange(adata.n_obs):
delta_X[i, :] = Js[:, :, i].dot(tmp[i] * J_jv_delta_t)
for i in np.arange(adata.n_obs):
delta_Y[i, :] = Js[:, :, i].dot(delta_X[i] * delta_t)
if add_delta_Y_key is None:
add_delta_Y_key = pertubation_method + "_perturbation"
logger.info_insert_adata(add_delta_Y_key, "obsm", indent_level=1)
if pertubation_method.lower() == "f_x_prime":
_, func = vecfld_from_adata(adata, basis)
vec_mat = func(X_perturb_pca)
delta_Y = vec_mat
elif pertubation_method.lower() == "f_x_prime_minus_f_x_0":
_, func = vecfld_from_adata(adata, basis)
vec_mat = func(X_perturb_pca) - func(X_pca)
delta_Y = vec_mat
adata.obsm[add_delta_Y_key] = delta_Y
perturbation_csc = vector_transformation(delta_Y, PCs)
adata.layers[add_delta_Y_key] = csr_matrix(adata.shape, dtype=np.float64)
adata.layers[add_delta_Y_key][:, adata.var.use_for_pca] = perturbation_csc
if zero_perturb_genes_vel:
adata.layers[add_delta_Y_key][:, gene_loc] = 0
logger.info(
"project the pca perturbation vector to low dimensional space....",
)
if add_transition_key is None:
transition_key = "perturbation_transition_matrix"
else:
transition_key = add_transition_key
if add_velocity_key is None:
velocity_key, embedding_key = "velocity_" + emb_basis + "_perturbation", "X_" + emb_basis + "_perturbation"
else:
velocity_key, embedding_key = add_velocity_key, add_embedding_key
cell_velocities(
adata,
X=X_pca,
V=delta_Y,
basis=emb_basis,
enforce=True,
method=projection_method,
add_transition_key=transition_key,
add_velocity_key=velocity_key,
)
logger.info_insert_adata("X_" + emb_basis + "_perturbation", "obsm", indent_level=1)
logger.info(
f"you can use dyn.pl.streamline_plot(adata, basis='{emb_basis}_perturbation') to visualize the "
f"perturbation vector"
)
adata.obsm[embedding_key] = adata.obsm["X_" + emb_basis].copy()
def rank_perturbation_genes(adata, pkey="j_delta_x_perturbation", prefix_store="rank", **kwargs):
"""Rank genes based on their raw and absolute perturbation effects for each cell group.
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object that contains the gene-wise perturbation effect vectors.
pkey: str (default: 'perturbation_vector')
The perturbation key.
prefix_store: str (default: 'rank')
The prefix added to the key for storing the returned ranking information in adata.
kwargs:
Keyword arguments passed to `vf.rank_genes`.
Returns
-------
adata: :class:`~anndata.AnnData`
AnnData object which has the rank dictionary for perturbation effects in `.uns`.
"""
rdict = rank_genes(adata, pkey, **kwargs)
rdict_abs = rank_genes(adata, pkey, abs=True, **kwargs)
adata.uns[prefix_store + "_" + pkey] = rdict
adata.uns[prefix_store + "_abs_" + pkey] = rdict_abs
return adata
def rank_perturbation_cells(adata, pkey="j_delta_x_perturbation", prefix_store="rank", **kwargs):
"""Rank cells based on their raw and absolute perturbation for each cell group.
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object that contains the gene-wise velocities.
pkey: str (default: 'perturbation_vector')
The perturbation key.
prefix_store: str (default: 'rank')
The prefix added to the key for storing the returned in adata.
kwargs:
Keyword arguments passed to `vf.rank_cells`.
Returns
-------
adata: :class:`~anndata.AnnData`
AnnData object which has the rank dictionary for perturbation effects in `.uns`.
"""
rdict = rank_cells(adata, pkey, **kwargs)
rdict_abs = rank_cells(adata, pkey, abs=True, **kwargs)
adata.uns[prefix_store + "_" + pkey + "_cells"] = rdict
adata.uns[prefix_store + "_abs_" + pkey + "_cells"] = rdict_abs
return adata
def rank_perturbation_cell_clusters(adata, pkey="j_delta_x_perturbation", prefix_store="rank", **kwargs):
"""Rank cells based on their raw and absolute perturbation for each cell group.
Parameters
----------
adata: :class:`~anndata.AnnData`
AnnData object that contains the gene-wise velocities.
pkey: str (default: 'perturbation_vector')
The perturbation key.
prefix_store: str (default: 'rank')
The prefix added to the key for storing the returned in adata.
kwargs:
Keyword arguments passed to `vf.rank_cells`.
Returns
-------
adata: :class:`~anndata.AnnData`
AnnData object which has the rank dictionary for perturbation effects in `.uns`.
"""
rdict = rank_cell_groups(adata, pkey, **kwargs)
rdict_abs = rank_cell_groups(adata, pkey, abs=True, **kwargs)
adata.uns[prefix_store + "_" + pkey + "_cell_groups"] = rdict
adata.uns[prefix_store + "_abs_" + pkey + "_cells_groups"] = rdict_abs
return adata
|
ptgnn/tests/simplemodel/test_model.py | mir-am/ptgnn | 319 | 12636357 | <reponame>mir-am/ptgnn
import tempfile
import unittest
from pathlib import Path
from typing import List, Optional, Tuple
from ptgnn.baseneuralmodel import ModelTrainer
from ptgnn.tests.simplemodel.data import SampleDatapoint, SyntheticData
from ptgnn.tests.simplemodel.model import SimpleRegressionModel
class TestPytorchModel(unittest.TestCase):
"""
Model of a Boolean classifier of positive/negative cross-product of input features and weights
"""
def __get_data(
self,
num_points: int,
num_features: int,
random_seed: Optional[int] = None,
train_test_split_pct=0.9,
) -> Tuple[List[SampleDatapoint], List[SampleDatapoint]]:
"""
generates data as SyntheticData = SampleDataPoint[num_points] where
SampleDataPoint<x,y> = <float[__num_features],bool>, and y= sum(x*w) >0;
weigths ~ N(0,1)*10, and SampleDataPoint.x ~ N(0,1)*5.
Returns tuple of train and test data, split at @train_test_split_pct %
"""
data = SyntheticData(num_features, random_seed=random_seed)
all_data = list(data.generate(num_points))
train_test_split = int(num_points * train_test_split_pct)
training_data, validation_data = all_data[:train_test_split], all_data[train_test_split:]
return training_data, validation_data
def test_parallel(self):
self.train_and_compare_model(True)
def test_parallel_no_multiprocessing(self):
self.train_and_compare_model(True, multiprocessing=False)
def test_sequential(self):
self.train_and_compare_model(False)
def train_and_compare_model(self, parallelize: bool, multiprocessing: bool = True):
num_points = 10000
num_features = 100
max_num_epochs = 50
random_seed = 1234 # None to seed from clock
training_data, validation_data = self.__get_data(
num_points, num_features, random_seed=random_seed
)
with tempfile.TemporaryDirectory() as dir:
model_file = Path(dir) / "testModel.pkl.gz"
model = SimpleRegressionModel()
trainer = ModelTrainer(model, model_file, max_num_epochs=max_num_epochs)
trainer.train(
training_data,
validation_data,
parallelize=parallelize,
use_multiprocessing=multiprocessing,
)
model_acc_1 = model.compute_accuracy(
trainer.neural_module,
iter(validation_data),
parallelize,
use_multiprocessing=multiprocessing,
)
model, trained_network = SimpleRegressionModel.restore_model(model_file)
trained_model_acc = model.compute_accuracy(
trained_network,
iter(validation_data),
parallelize,
use_multiprocessing=multiprocessing,
)
self.assertGreater(
trained_model_acc, 0.95, f"Model achieves too low accuracy, {trained_model_acc:%}"
)
self.assertAlmostEqual(
trained_model_acc,
model_acc_1,
places=3,
msg=f"Accuracy before and after loading does not match: {trained_model_acc} vs {model_acc_1}",
)
if __name__ == "__main__":
unittest.main()
|
src/e2eflow/core/flownet.py | 3bhady/UnFlow | 281 | 12636359 | <gh_stars>100-1000
import tensorflow as tf
import tensorflow.contrib.slim as slim
import tensorflow.contrib.layers as layers
from ..ops import correlation
from .image_warp import image_warp
from .flow_util import flow_to_color
FLOW_SCALE = 5.0
def flownet(im1, im2, flownet_spec='S', full_resolution=False, train_all=False,
backward_flow=False):
num_batch, height, width, _ = tf.unstack(tf.shape(im1))
flownet_num = len(flownet_spec)
assert flownet_num > 0
flows_fw = []
flows_bw = []
for i, name in enumerate(flownet_spec):
assert name in ('C', 'c', 'S', 's')
channel_mult = 1 if name in ('C', 'S') else 3 / 8
full_res = full_resolution and i == flownet_num - 1
def scoped_block():
if name.lower() == 'c':
assert i == 0, 'FlowNetS must be used for refinement networks'
with tf.variable_scope('flownet_c_features'):
_, conv2_a, conv3_a = flownet_c_features(im1, channel_mult=channel_mult)
_, conv2_b, conv3_b = flownet_c_features(im2, channel_mult=channel_mult, reuse=True)
with tf.variable_scope('flownet_c') as scope:
flow_fw = flownet_c(conv3_a, conv3_b, conv2_a,
full_res=full_res,
channel_mult=channel_mult)
flows_fw.append(flow_fw)
if backward_flow:
scope.reuse_variables()
flow_bw = flownet_c(conv3_b, conv3_a, conv2_b,
full_res=full_res,
channel_mult=channel_mult)
flows_bw.append(flow_bw)
elif name.lower() == 's':
def _flownet_s(im1, im2, flow=None):
if flow is not None:
flow = tf.image.resize_bilinear(flow, [height, width]) * 4 * FLOW_SCALE
warp = image_warp(im2, flow)
diff = tf.abs(warp - im1)
if not train_all:
flow = tf.stop_gradient(flow)
warp = tf.stop_gradient(warp)
diff = tf.stop_gradient(diff)
inputs = tf.concat([im1, im2, flow, warp, diff], axis=3)
inputs = tf.reshape(inputs, [num_batch, height, width, 14])
else:
inputs = tf.concat([im1, im2], 3)
return flownet_s(inputs,
full_res=full_res,
channel_mult=channel_mult)
stacked = len(flows_fw) > 0
with tf.variable_scope('flownet_s') as scope:
flow_fw = _flownet_s(im1, im2, flows_fw[-1][0] if stacked else None)
flows_fw.append(flow_fw)
if backward_flow:
scope.reuse_variables()
flow_bw = _flownet_s(im2, im1, flows_bw[-1][0] if stacked else None)
flows_bw.append(flow_bw)
if i > 0:
scope_name = "stack_{}_flownet".format(i)
with tf.variable_scope(scope_name):
scoped_block()
else:
scoped_block()
if backward_flow:
return flows_fw, flows_bw
return flows_fw
def _leaky_relu(x):
with tf.variable_scope('leaky_relu'):
return tf.maximum(0.1 * x, x)
def _flownet_upconv(conv6_1, conv5_1, conv4_1, conv3_1, conv2, conv1=None, inputs=None,
channel_mult=1, full_res=False, channels=2):
m = channel_mult
flow6 = slim.conv2d(conv6_1, channels, 3, scope='flow6',
activation_fn=None)
deconv5 = slim.conv2d_transpose(conv6_1, int(512 * m), 4, stride=2,
scope='deconv5')
flow6_up5 = slim.conv2d_transpose(flow6, channels, 4, stride=2,
scope='flow6_up5',
activation_fn=None)
concat5 = tf.concat([conv5_1, deconv5, flow6_up5], 1)
flow5 = slim.conv2d(concat5, channels, 3, scope='flow5',
activation_fn=None)
deconv4 = slim.conv2d_transpose(concat5, int(256 * m), 4, stride=2,
scope='deconv4')
flow5_up4 = slim.conv2d_transpose(flow5, channels, 4, stride=2,
scope='flow5_up4',
activation_fn=None)
concat4 = tf.concat([conv4_1, deconv4, flow5_up4], 1)
flow4 = slim.conv2d(concat4, channels, 3, scope='flow4',
activation_fn=None)
deconv3 = slim.conv2d_transpose(concat4, int(128 * m), 4, stride=2,
scope='deconv3')
flow4_up3 = slim.conv2d_transpose(flow4, channels, 4, stride=2,
scope='flow4_up3',
activation_fn=None)
concat3 = tf.concat([conv3_1, deconv3, flow4_up3], 1)
flow3 = slim.conv2d(concat3, channels, 3, scope='flow3',
activation_fn=None)
deconv2 = slim.conv2d_transpose(concat3, int(64 * m), 4, stride=2,
scope='deconv2')
flow3_up2 = slim.conv2d_transpose(flow3, channels, 4, stride=2,
scope='flow3_up2',
activation_fn=None)
concat2 = tf.concat([conv2, deconv2, flow3_up2], 1)
flow2 = slim.conv2d(concat2, channels, 3, scope='flow2',
activation_fn=None)
flows = [flow2, flow3, flow4, flow5, flow6]
if full_res:
with tf.variable_scope('full_res'):
deconv1 = slim.conv2d_transpose(concat2, int(32 * m), 4, stride=2,
scope='deconv1')
flow2_up1 = slim.conv2d_transpose(flow2, channels, 4, stride=2,
scope='flow2_up1',
activation_fn=None)
concat1 = tf.concat([conv1, deconv1, flow2_up1], 1)
flow1 = slim.conv2d(concat1, channels, 3, scope='flow1',
activation_fn=None)
deconv0 = slim.conv2d_transpose(concat1, int(16 * m), 4, stride=2,
scope='deconv0')
flow1_up0 = slim.conv2d_transpose(flow1, channels, 4, stride=2,
scope='flow1_up0',
activation_fn=None)
concat0 = tf.concat([inputs, deconv0, flow1_up0], 1)
flow0 = slim.conv2d(concat0, channels, 3, scope='flow0',
activation_fn=None)
flows = [flow0, flow1] + flows
return flows
def nhwc_to_nchw(tensors):
return [tf.transpose(t, [0, 3, 1, 2]) for t in tensors]
def nchw_to_nhwc(tensors):
return [tf.transpose(t, [0, 2, 3, 1]) for t in tensors]
def flownet_s(inputs, channel_mult=1, full_res=False):
"""Given stacked inputs, returns flow predictions in decreasing resolution.
Uses FlowNetSimple.
"""
m = channel_mult
inputs = nhwc_to_nchw([inputs])[0]
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],
data_format='NCHW',
weights_regularizer=slim.l2_regularizer(0.0004),
weights_initializer=layers.variance_scaling_initializer(),
activation_fn=_leaky_relu):
conv1 = slim.conv2d(inputs, int(64 * m), 7, stride=2, scope='conv1')
conv2 = slim.conv2d(conv1, int(128 * m), 5, stride=2, scope='conv2')
conv3 = slim.conv2d(conv2, int(256 * m), 5, stride=2, scope='conv3')
conv3_1 = slim.conv2d(conv3, int(256 * m), 3, stride=1, scope='conv3_1')
conv4 = slim.conv2d(conv3_1, int(512 * m), 3, stride=2, scope='conv4')
conv4_1 = slim.conv2d(conv4, int(512 * m), 3, stride=1, scope='conv4_1')
conv5 = slim.conv2d(conv4_1, int(512 * m), 3, stride=2, scope='conv5')
conv5_1 = slim.conv2d(conv5, int(512 * m), 3, stride=1, scope='conv5_1')
conv6 = slim.conv2d(conv5_1, int(1024 * m), 3, stride=2, scope='conv6')
conv6_1 = slim.conv2d(conv6, int(1024 * m), 3, stride=1, scope='conv6_1')
res = _flownet_upconv(conv6_1, conv5_1, conv4_1, conv3_1, conv2, conv1, inputs,
channel_mult=channel_mult, full_res=full_res)
return nchw_to_nhwc(res)
def flownet_c_features(im, channel_mult=1, reuse=None):
m = channel_mult
im = nhwc_to_nchw([im])[0]
with slim.arg_scope([slim.conv2d],
data_format='NCHW',
weights_regularizer=slim.l2_regularizer(0.0004),
weights_initializer=layers.variance_scaling_initializer(),
activation_fn=_leaky_relu):
conv1 = slim.conv2d(im, int(64 * m), 7, stride=2, scope='conv1', reuse=reuse)
conv2 = slim.conv2d(conv1, int(128 * m), 5, stride=2, scope='conv2', reuse=reuse)
conv3 = slim.conv2d(conv2, int(256 * m), 5, stride=2, scope='conv3', reuse=reuse)
return conv1, conv2, conv3
def flownet_c(conv3_a, conv3_b, conv2_a, channel_mult=1, full_res=False):
"""Given two images, returns flow predictions in decreasing resolution.
Uses FlowNetCorr.
"""
m = channel_mult
with slim.arg_scope([slim.conv2d, slim.conv2d_transpose],
data_format='NCHW',
weights_regularizer=slim.l2_regularizer(0.0004),
weights_initializer=layers.variance_scaling_initializer(),
activation_fn=_leaky_relu):
corr = correlation(conv3_a, conv3_b,
pad=20, kernel_size=1, max_displacement=20, stride_1=1, stride_2=2)
conv_redir = slim.conv2d(conv3_a, int(32 * m), 1, stride=1, scope='conv_redir')
conv3_1 = slim.conv2d(tf.concat([conv_redir, corr], 1), int(256 * m), 3,
stride=1, scope='conv3_1')
conv4 = slim.conv2d(conv3_1, int(512 * m), 3, stride=2, scope='conv4')
conv4_1 = slim.conv2d(conv4, int(512 * m), 3, stride=1, scope='conv4_1')
conv5 = slim.conv2d(conv4_1, int(512 * m), 3, stride=2, scope='conv5')
conv5_1 = slim.conv2d(conv5, int(512 * m), 3, stride=1, scope='conv5_1')
conv6 = slim.conv2d(conv5_1, int(1024 * m), 3, stride=2, scope='conv6')
conv6_1 = slim.conv2d(conv6, int(1024 * m), 3, stride=1, scope='conv6_1')
res = _flownet_upconv(conv6_1, conv5_1, conv4_1, conv3_1, conv2_a,
channel_mult=channel_mult, full_res=full_res)
return nchw_to_nhwc(res)
|
nova/tests/functional/test_flavor_extraspecs.py | zjzh/nova | 1,874 | 12636386 | <reponame>zjzh/nova
# Copyright 2020, Red Hat, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for os-extra_specs API."""
from nova.tests.functional.api import client as api_client
from nova.tests.functional import integrated_helpers
class FlavorExtraSpecsTest(integrated_helpers._IntegratedTestBase):
api_major_version = 'v2'
def setUp(self):
super(FlavorExtraSpecsTest, self).setUp()
self.flavor_id = self._create_flavor()
def test_create(self):
"""Test creating flavor extra specs with valid specs."""
body = {
'extra_specs': {'hw:numa_nodes': '1', 'hw:cpu_policy': 'shared'},
}
self.admin_api.post_extra_spec(self.flavor_id, body)
self.assertEqual(
body['extra_specs'], self.admin_api.get_extra_specs(self.flavor_id)
)
def test_create_invalid_spec(self):
"""Test creating flavor extra specs with invalid specs.
This should pass because validation is not enabled in this API
microversion.
"""
body = {'extra_specs': {'hw:numa_nodes': 'foo', 'foo': 'bar'}}
self.admin_api.post_extra_spec(self.flavor_id, body)
self.assertEqual(
body['extra_specs'], self.admin_api.get_extra_specs(self.flavor_id)
)
def test_update(self):
"""Test updating extra specs with valid specs."""
spec_id = 'hw:numa_nodes'
body = {'hw:numa_nodes': '1'}
self.admin_api.put_extra_spec(self.flavor_id, spec_id, body)
self.assertEqual(
body, self.admin_api.get_extra_spec(self.flavor_id, spec_id)
)
def test_update_invalid_spec(self):
"""Test updating extra specs with invalid specs.
This should pass because validation is not enabled in this API
microversion.
"""
spec_id = 'hw:foo'
body = {'hw:foo': 'bar'}
self.admin_api.put_extra_spec(self.flavor_id, spec_id, body)
self.assertEqual(
body, self.admin_api.get_extra_spec(self.flavor_id, spec_id)
)
class FlavorExtraSpecsV286Test(FlavorExtraSpecsTest):
api_major_version = 'v2.1'
microversion = '2.86'
def test_create_invalid_spec(self):
"""Test creating extra specs with invalid specs."""
body = {'extra_specs': {'hw:numa_nodes': 'foo', 'foo': 'bar'}}
# this should fail because 'foo' is not a suitable value for
# 'hw:numa_nodes'
exc = self.assertRaises(
api_client.OpenStackApiException,
self.admin_api.post_extra_spec,
self.flavor_id, body,
)
self.assertEqual(400, exc.response.status_code)
# ...and the extra specs should not be saved
self.assertEqual({}, self.admin_api.get_extra_specs(self.flavor_id))
def test_create_unknown_spec(self):
"""Test creating extra specs with unknown specs."""
body = {'extra_specs': {'hw:numa_nodes': '1', 'foo': 'bar'}}
# this should pass because we don't recognize the extra spec but it's
# not in a namespace we care about
self.admin_api.post_extra_spec(self.flavor_id, body)
body = {'extra_specs': {'hw:numa_nodes': '1', 'hw:foo': 'bar'}}
# ...but this should fail because we do recognize the namespace
exc = self.assertRaises(
api_client.OpenStackApiException,
self.admin_api.post_extra_spec,
self.flavor_id, body,
)
self.assertEqual(400, exc.response.status_code)
def test_update_invalid_spec(self):
"""Test updating extra specs with invalid specs."""
spec_id = 'hw:foo'
body = {'hw:foo': 'bar'}
# this should fail because we don't recognize the extra spec
exc = self.assertRaises(
api_client.OpenStackApiException,
self.admin_api.put_extra_spec,
self.flavor_id, spec_id, body,
)
self.assertEqual(400, exc.response.status_code)
spec_id = 'hw:numa_nodes'
body = {'hw:numa_nodes': 'foo'}
# ...while this should fail because the value is not valid
exc = self.assertRaises(
api_client.OpenStackApiException,
self.admin_api.put_extra_spec,
self.flavor_id, spec_id, body,
)
self.assertEqual(400, exc.response.status_code)
# ...and neither extra spec should be saved
self.assertEqual({}, self.admin_api.get_extra_specs(self.flavor_id))
def test_update_unknown_spec(self):
"""Test updating extra specs with unknown specs."""
spec_id = 'foo:bar'
body = {'foo:bar': 'baz'}
# this should pass because we don't recognize the extra spec but it's
# not in a namespace we care about
self.admin_api.put_extra_spec(self.flavor_id, spec_id, body)
self.assertEqual(body, self.admin_api.get_extra_specs(self.flavor_id))
|
registration/urls.py | timgates42/timestrap | 1,758 | 12636389 | <reponame>timgates42/timestrap
from django.urls import path
from django.contrib.auth import views as auth_views
from .forms import TimestrapPasswordResetForm
urlpatterns = [
path(
"password_reset/",
auth_views.PasswordResetView.as_view(form_class=TimestrapPasswordResetForm),
name="password_reset",
)
]
|
recipes/Python/576531_Circle/recipe-576531.py | tdiprima/code | 2,023 | 12636402 | #On the name of ALLAH and may the blessing and peace of Allah
#be upon the Messenger of <NAME>.
#Author :<NAME>
#Date : 08/10/08
#Version : 2.4
""" Class of an equation of a circle of the form Ax^2 + Ay^2 + Dx + Ey + F = 0 (A !=0)
it represents a circle or a point or has no graph , depending of the radius value. And a class
of an equation for the circle of radius r and centred at point (x0,y0). """
import math
class Circle(object):
""" Class that represent an equation of a circle
with A,D,E,F constants properties """
def __init__(self, Avalue,Dvalue,Evalue,Fvalue):
""" Circle construction takes A,D,E,F Constants """
self.__A = float(Avalue)
self.__D = float(Dvalue)
self.__E = float(Evalue)
self.__F = float(Fvalue)
self._b = chr(253)
self._a = self._checkSign(self.__A)
self._d= self._checkSign(self.__D)
self._e = self._checkSign(self.__E)
self._f = self._checkSign(self.__F)
self._g = ((self.__D/self.__A)/2)
self._g1= self.__D/2
self._h =((self.__E/self.__A)/2)
self._h1 = self.__E/2
self._i = self._checkSign(self._g)
self._j = self._checkSign(self._h)
self._k = (-self.__F/self.__A + self._g**2 +self._h**2)
self._k1= (-self.__F + self._g1**2 +self._h1**2)
self._l = "%2.2f" % math.sqrt(abs(self._k))
self._l1 = "%2.2f" % math.sqrt(abs(self._k1))
self._m = "(x%s%s)%s+(y%s%s)%s = %s" % \
(self._i,self._g,self._b,self._j,self._h,self._b,self._k)
self._m1 = "(x%s%s)%s+(y%s%s)%s = %s" % \
(self._i,self._g1,self._b,self._j,self._h1,self._b,self._k1)
self._n = "(%s,%s)" % (-self._g,-self._h)
self._n1 = "(%s,%s)" % (-self._g1,-self._h1)
def __str__(self):
""" String representation of the circle equation,
standard form , centre and radius """
try:
math.sqrt(self._k)
#Circle raises zero degenerate case
assert math.sqrt(self._k) != 0,"The graph is the single point %s" % \
Circle.centre(self)
if self.__A == 0:
return "\n<Equation of a circle : x%s + y%s %s %sx %s %sy %s %s = 0 \
\n\n%s %35s %25s \n\n%s %22s %24s\n" %\
(self._b,self._b,self._d,int(self.D),self._e, \
int(self.E),self._f,int(self.F),
'Standard form','Centre(x0,y0)','Radius r' \
self._m1,Circle.centre(self),Circle.radius(self))
else:
return "\n<Equation of a circle : %sx%s + %sy%s %s %sx %s %sy %s %s = 0 \
\n\n%s %35s %25s \n\n%s %22s %24s\n" %\
(int(self.A)self._b,int(self.A),self._b,self._d,int(self.D),self._e, \
int(self.E),self._f,int(self.F),
'Standard form', 'Centre(x0,y0)','Radius r' \
self._m,Circle.centre(self),Circle.radius(self))
#Circle raises Negative number degenerate case
except ValueError:
raise ValueError,\
" r%s < 0 : Degenerate case has no graph" % self._b
def getA(self):
""" Get method for A attribute """
if self.__A != 0:
return self.__A
else:
raise ValueError,\
" A value should be different than zero "
def setA(self,value):
""" Set method for A attribute """
self.__A = value
def delA(self):
""" Delete method for A attribute """
del self.__A
#Create A property
A = property(getA,setA,delA,"A constant")
def getD(self):
""" Get method for D attribute """
return self.__D
def setD(self,value):
""" Set method for D attribute """
self.__D = value
def delD(self):
""" Delete method for D attribute """
del self.__D
#Create D property
D = property(getD,setD,delD,"D constant")
def getE(self):
""" Get method for E attribute """
return self.__E
def setE(self,value):
""" Set method for E attribute """
self.__E = value
def delE(self):
""" Delete method for E attribute """
del self.__E
#Create E property
E = property(getE,setE,delE,"E constant")
def getF(self):
""" Get method for F attribute """
return self.__F
def setF(self,value):
""" Set method for F attribute """
self.__F = value
def delF(self):
""" Delete method for F attribute """
del self.__F
#Create F property
F = property(getF,setF,delF,"F constant")
def _checkSign(self,value):
""" Utility method to check the values’ signs and return a sign string """
if value >= 0:
return "+"
else:
return ""
def radius(self):
""" Compute radius of a circle """
if self.__A == 1:
return self._l1
else:
return self._l
def centre(self):
""" Compute centre(x0,y0) of a circle """
if self.__A == 1:
return self._n1
else:
return self._n
class Equation(Circle):
"""Class that represent a radius and the centre of a circle """
def __init__(self,x,y,radius):
"""Equation construction takes centre(xValue,yValue
and radius"""
self.__x = float(x)
self.__y = float(y)
self.__radius = float(radius)
self._o = chr(253)
self._p = self.__radius**2
self._q = self._checkSign(-self.__x)
self._r = self._checkSign(-self.__y)
self._s = "(x%s%s)%s + (y%s%s)%s = %s " % \
(self._q,-self.__x,self._o,self._r,-self.__y,self._o,self._p)
self._t = self.__x**2 + self.__y**2 -self._p
self._u = self._checkSign(self._t)
self._v = "x%s + y%s %s %sx %s %sy %s %s = 0 " % \
(self._o,self._o,self._q,-self.__x*2,self._r,-self.__y*2,self._u,self._t)
def __str__(self):
""" String representation of the circle equation, standard form ,centre and radius """
#Equation raises radius value < 0
assert self.__radius > 0, "<Radius value should be greater than zero"
return ( "\n<Equation for the circle of radius (%s)\
centred at (%s,%s) is : \n\n%s < -- > %s" ) % \
(self.__radius,self.__x,self.__y,self._s,self._v)
if __name__ == "__main__":
circle1 = Circle(16,40,16,-7)
print circle1
#Though students might use only values of radius and circle
print radius.circle1()
print centre.circle1()
circle2 = Circle(2,24,0,-81)
print circle2
del circle2.A
circle2.A = 1
print circle2
equation = Equation(2,5,3)
print equation
for doc in (Circle.A,Circle.D,Circle.E,Circle.F):
print doc.__doc__,doc.fget.func_name,doc.fset.func_name,doc.fdel.func_name
########################################################################################
#Version : Python 3.2
#import math
#class Circle(object):
# """ Class that represent an equation of a circle
# with A,D,E,F constants properties"""
#
# def __init__(self,Avalue,Dvalue,Evalue,Fvalue):
# """ Circle constructor takes A,D,F,E constants """
#
# self.__A = float(Avalue)
# self.__D = float(Dvalue)
# self.__E = float(Evalue)
# self.__F = float(Fvalue)
#
# self._b = chr(178)
# self._a = self._checkSign(self.__A)
# self._d = self._checkSign(self.__D)
# self._e = self._checkSign(self.__E)
# self._f = self._checkSign(self.__F)
# self._g = ((self.__D/self.__A)/2)
# self._g1 = self.D/2
# self._h = ((self.__E/self.__A)/2)
# self._h1 = self.E/2
# self._i = self._checkSign(self._g)
# self._j = self._checkSign(self._h)
# self._k = (-self.__F/self.__A +self._g**2 + self._h**2)
# self._k1= (-self.__F +self._g1**2 + self._h1**2)
# self._l = "%2.2f" % math.sqrt(abs(self._k))
# self._l1= "%2.2f" % math.sqrt(abs(self._k1))
# self._m = "(x%s%s)%s+(y%s%s)%s = %s" % \
# (self._i,self._g,self._b,self._j,self._h,self._b,self._k)
# self._m1 ="(x%s%s)%s+(y%s%s)%s = %s" % \
# (self._i,self._g1,self._b,self._j,self._h1,self._b,self._k1)
# self._n = "(%s,%s)" % (-self._g,-self._h)
# self._n1= "(%s,%s)" % (-self._g1,-self._h1)
#
#
# def squared(self):
# self._w =(-self.__F/self.__A +((self.__D/self.__A)/2)**2 + ((self.__E/self.__A)/2)**2)
# return self._w
# def standardForm(self):
# return "(x%s%s)%s+(y%s%s)%s = %s" % \
# (self._checkSign(((self.__D/self.__A)/2)),((self.__D/self.__A)/2),chr(178),self._checkSign(((self.__E/self.__A)/2)),((self.__E/self.__A)/2),chr(178),(-self.__F/self.__A +((self.__D/self.__A)/2)**2 + ((self.__E/self.__A)/2)**2))
#
# def __str__(self):
# """ String representation of the circle equation,
# standard form, centre and radius"""
#
# try:
# math.sqrt(Circle.squared(self))
#
# #Circle raises zero degenerate case
# assert math.sqrt(Circle.squared(self)) != 0,"The graph is the single point %s" % \
# Circle.centre(self)
# if self.__A == 1:
#
# return "\n<Equation of a circle : x%s + y%s %s %sx %s %sy %s %s = 0 \
# \n\n%s %35s %25s \n\n%s %22s %24s\n" %\
# (self._b,self._b,self._d,int(self.D),self._e,\
# int(self.E),self._f,int(self.F),
# "Standard form","Center(x0,y0)","Radius r",\
# self._m1,Circle.centre(self),Circle.radius(self))
# else:
# return "\n<Equation of a circle : %sx%s + %sy%s %s %sx %s %sy %s %s = 0 \
# \n\n%s %35s %25s \n\n%s %22s %24s\n" %\
# (int(self.A),self._b,int(self.A),self._b,self._d,int(self.D),self._e,\
# int(self.E),self._f,int(self.F),
# "Standard form","Center(x0,y0)","Radius r",\
# Circle.standardForm(self),Circle.centre(self),Circle.radius(self))
#
# #Circle raises Negative number degenerate case
# except ValueError:
# raise ValueError("r%s < 0 : Degenerate case has no graph" % self._b)
#
# def getA(self):
# """ Get method for A attribute """
# if self.__A !=0:
# return self.__A
# else:
# raise ValueError("A value should be differtent than zero")
#
# def setA(self,value):
# """ Set method for A attribute """
#
# self.__A = value
#
# def delA(self):
# """Delete method for A attrobute"""
#
# del self.__A
#
# #Create a property
# A = property(getA,setA,delA,"A constant")
#
# def getD(self):
# """ Get method for D attribute """
#
# return self.__D
#
# def setD(self,value):
# """ Set method for D attribute """
#
# self.__D = value
#
# def delD(self):
# """Delete method for D attrobute"""
# del self.__D
#
# #Create a property
# D = property(getD,setD,delD,"D constant")
# def getE(self):
# """ Get method for E attribute """
# return self.__E
#
# def setE(self,value):
# """ Set method for E attribute """
#
# self.__E = value
#
# def delE(self):
# """Delete method for E attrobute"""
#
# del self.__E
#
# #Create a property
# E = property(getE,setE,delE,"E constant")
#
# def getF(self):
# """ Get method for F attribute """
#
# return self.__F
#
# def setF(self,value):
# """ Set method for F attribute """
#
# self.__F = value
#
# def delF(self):
# """Delete method for F attrobute"""
#
# del self.__F
#
# #Create a property
# F = property(getF,setF,delF,"F constant")
#
# def _checkSign(self,value):
# """ Utility method to check the values's sign
# and return a sign string"""
#
# if value >= 0:
# return "+"
# else :
# return ""
#
# def radius(self):
# """ Computes radius of a circle """
# if self.__A ==1:
# return self._l1
# else:
# return "%2.2f" % math.sqrt(abs(Circle.squared(self)))
#
# def centre(self):
# """ Computes centre(x0,y0) of a circle """
# if self.__A == 1:
# return self._n1
# else:
# return "(%s,%s)" % (-((self.__D/self.__A)/2),-((self.__E/self.__A)/2))
#
#
#
#class Equation(Circle):
# """ class that represent a radius and the centre of a circle """
#
# def __init__(self,x,y,radius):
# """ Equation construction takes centre(xValue,yValue)
# and radius """
#
# self.__x = float(x)
# self.__y = float(y)
# self.__radius = float(radius)
#
# self._o = chr(178)
# self._p = self.__radius**2
# self._q = self._checkSign(-self.__x)
# self._r = self._checkSign(-self.__y)
# self._s = "(x%s%s)%s+(y%s%s)%s = %s" % \
# (self._q,-self.__x,self._o,self._r,-self.__y,self._o,self._p)
# self._t = self.__x**2 + self.__y**2 - self._p
# self._u = self._checkSign(self._t)
# self._v = "x%s + y%s %s%sx %s%sy %s%s = 0" % \
# (self._o,self._o,self._q,-self.__x*2,self._r,-self.__y*2,self._u,self._t)
#
# def __str__(self):
# """ String representation of the circle equation, standard form,
# centre and radius"""
#
# #Equation raises radius value < 0
# assert self.__radius > 0, "<radius value should be greater than zero"
#
# return ("\n<Equation for the circle of radius (%s)\
# centred at(%s,%s) is :\n\n%s <--> %s") %\
# (self.__radius,self.__x,self.__y,self._s,self._v )
#
#
#if __name__ == "__main__":
# circle1 = Circle(10,40,16,-7)
# print(circle1)
#
# print(circle1.radius())
# print(circle1.centre())
# circle1.delA
# circle1.A=1
# print(circle1)
# circle3 = Circle(5,24,0,-81)
# print(circle3)
#
# circle3.E =80
# print(circle3)
#
# equation = Equation(2,5,3)
# print(equation)
#
#
# for doc in (Circle.A,Circle.D,Circle.E,Circle.F):
# print(doc.__doc__,"=",doc.fget.__name__,doc.fset.__name__,doc.fdel.__name__)
#######################################################################################
#<Equation of a circle : 10x² + 10y² + 40x + 16y -7 = 0
#Standard form Center(x0,y0) Radius r
#(x+2.0)²+(y+0.8)² = 5.34 (-2.0,-0.8) 2.31
#2.31
#(-2.0,-0.8)
#<Equation of a circle : x² + y² + 40x + 16y -7 = 0
#Standard form Center(x0,y0) Radius r
#(x+20.0)²+(y+8.0)² = 471.0 (-20.0,-8.0) 21.70
#<Equation of a circle : 5x² + 5y² + 24x + 0y -81 = 0
#Standard form Center(x0,y0) Radius r
#(x+2.4)²+(y+0.0)² = 21.96 (-2.4,-0.0) 4.69
#<Equation of a circle : 5x² + 5y² + 24x + 80y -81 = 0
#Standard form Center(x0,y0) Radius r
#(x+2.4)²+(y+8.0)² = 85.96 (-2.4,-8.0) 9.27
#<Equation for the circle of radius (3.0) centred at(2.0,5.0) is :
#(x-2.0)²+(y-5.0)² = 9.0 <--> x² + y² -4.0x -10.0y +20.0 = 0
#A constant = getA setA delA
#D constant = getD setD delD
#E constant = getE setE delE
#F constant = getF setF delF
|
php_opcache/tests/test_php_opcache.py | divyamamgai/integrations-extras | 158 | 12636428 | <gh_stars>100-1000
import pytest
from datadog_checks.base import ConfigurationError
from datadog_checks.dev.utils import get_metadata_metrics
from datadog_checks.php_opcache import PhpOpcacheCheck
from .common import EXPECTED_METRICS
@pytest.mark.unit
def test_config():
instance = {}
c = PhpOpcacheCheck('php_opcache', {}, [instance])
with pytest.raises(ConfigurationError):
c.check(instance)
c.check({'url': 'http://foobar'})
@pytest.mark.integration
@pytest.mark.usefixtures('dd_environment')
def test_service_check(aggregator, instance):
c = PhpOpcacheCheck('php_opcache', {}, [instance])
c.check(instance)
aggregator.assert_service_check('php_opcache.can_connect', PhpOpcacheCheck.OK)
aggregator.reset()
instance['url'] = instance['url'].replace('.php', '')
c.check(instance)
aggregator.assert_service_check('php_opcache.can_connect', PhpOpcacheCheck.CRITICAL)
@pytest.mark.integration
@pytest.mark.usefixtures('dd_environment')
def test_metrics(aggregator, instance):
c = PhpOpcacheCheck('php_opcache', {}, [instance])
c.check(instance)
for k, v in EXPECTED_METRICS.items():
aggregator.assert_metric(k, at_least=v)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_submission_type=True)
|
recipes/Python/578139_Metronome_For_Beginner_Musicians/recipe-578139.py | tdiprima/code | 2,023 | 12636441 | # Metronome3x.py
#
# DEMO simple metronome that exploits a minor flaw in the /dev/audio and /dev/dsp devices inside Linux.
# It can tick at around 30 to 400 beats per minute. This minimal code can be improved upon to give
# greater accuracy, range and appearance on screen if required.
#
# Original copyright, (C)2007-2012, B.Walker, G0LCU. Now issued as Public Domain and you may do with
# it as you please.
#
# There is a small flaw that uses the Ctrl-C part of the code. I'll let the big guns tell you users
# that can't find it what it is. It is not a bug as such, but it is a flaw.
#
# Tested on an HP Notebook with Debian 6.0.0 and Python 3.1.3 and an Acer Aspire One Netbook with
# PCLinuxOS 2009 and Python 3.2.1.
# To run just type:-
#
# >>> exec(open("/absolute/path/to/Metronome3x.py").read())<RETURN/ENTER>
#
# And away you go...
#
# $VER: Metronome3x.py_Version_0.00.10_(C)2007-2012_B.Walker_G0LCU.
#
# Enjoy finding simple solutions to often very difficult problems...
# The only import(s) for this DEMO...
import time
import os
def main():
while 1:
# the _variable_ listing...
# "n" is throw away integer number and purposely reused.
# "beatstring" is the inputted string and is also reused.
# "beat" is the floating point number from about 0.x to 1.x generated from the inputted data.
#
# The standard Linux clear screen cmmand.
n=os.system("clear")
# Set up a basic user screen/window.
print("\nPython 3.x.x simple metronome for the Linux platform.\n")
print("(C)2007-2012, B.Walker, G0LCU. Issued as Public Domain.\n")
beatstring=input("Enter any whole number from 30 to 400 (bpm), (QUIT or EXIT to Quit):- ")
# Allow a means of quitting the DEMO.
if beatstring=="QUIT" or beatstring=="EXIT": break
# Don't allow any errors...
if len(beatstring)>=4: beatstring="100"
if len(beatstring)<=1: beatstring="100"
n=0
while n<=(len(beatstring)-1):
if beatstring[n]>=chr(48) and beatstring[n]<=chr(57): n=n+1
else: beatstring="100"
n=int(beatstring)
if n<=30: n=30
if n>=400: n=400
# Convert this integer "n" back to the "beatstring" string...
beatstring=str(n)
# Now convert to the floating point value for the time.sleep() function.
beat=((60/n)-0.125)
print("\nApproximate beats per minute = "+beatstring+"...\n")
print("Press Ctrl-C to enter another speed...")
while 1:
# Write directly to the /dev/dsp device.
try:
audio=open("/dev/dsp", "wb")
audio.write(b"\x00\xFF")
audio.close()
time.sleep(beat)
# There is a flaw here, I'll let you big guns find it... ;o)
# Note it is NOT really a bug!
except KeyboardInterrupt: break
main()
# End of the Metronome3x.py code.
# Enjoy finding simple solutions to often very difficult problems...
|
idaes/core/tests/test_process_base.py | carldlaird/idaes-pse | 112 | 12636463 | #################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Tests for process_base.
Author: <NAME>
"""
import pytest
from pyomo.environ import Block, ConcreteModel
from idaes.core.process_base import ProcessBaseBlock
from idaes.core import (FlowsheetBlockData,
declare_process_block_class)
from idaes.core.util.exceptions import ConfigurationError
@declare_process_block_class("Flowsheet")
class _Flowsheet(FlowsheetBlockData):
def build(self):
super(FlowsheetBlockData, self).build()
@pytest.mark.unit
def test_flowsheet():
# Test flowsheet method
m = ConcreteModel()
m.a = Flowsheet()
assert m.a.flowsheet() is None
m.b = Block()
m.b.c = Flowsheet()
assert m.b.c.flowsheet() is None
m.a.d = Flowsheet()
assert m.a.d.flowsheet() is m.a
m.a.e = Flowsheet([1, 2])
assert m.a.e[1].flowsheet() is m.a
m.a.e[1].f = Flowsheet()
assert m.a.e[1].f.flowsheet() is m.a.e[1]
m.a.g = Block()
m.a.g.h = Flowsheet()
assert m.a.g.h.flowsheet() is m.a
m.a.i = Block([1, 2])
m.a.i[1].j = Flowsheet()
assert m.a.i[1].j.flowsheet() is m.a
@pytest.mark.unit
def test_get_performance_contents():
m = ConcreteModel()
m.b = ProcessBaseBlock()
assert m.b._get_performance_contents(time_point=0) is None
@pytest.mark.unit
def test_get_stream_table_contents():
m = ConcreteModel()
m.b = ProcessBaseBlock()
assert m.b._get_stream_table_contents(time_point=0) is None
@pytest.mark.unit
def test_report():
# Test that no exceptions occur
m = ConcreteModel()
m.b = ProcessBaseBlock()
m.b.report(dof=True)
|
test/nn/dense/test_mincut_pool.py | NucciTheBoss/pytorch_geometric | 2,350 | 12636479 | import torch
from torch_geometric.nn import dense_mincut_pool
def test_dense_mincut_pool():
batch_size, num_nodes, channels, num_clusters = (2, 20, 16, 10)
x = torch.randn((batch_size, num_nodes, channels))
adj = torch.ones((batch_size, num_nodes, num_nodes))
s = torch.randn((batch_size, num_nodes, num_clusters))
mask = torch.randint(0, 2, (batch_size, num_nodes), dtype=torch.bool)
x, adj, mincut_loss, ortho_loss = dense_mincut_pool(x, adj, s, mask)
assert x.size() == (2, 10, 16)
assert adj.size() == (2, 10, 10)
assert -1 <= mincut_loss <= 0
assert 0 <= ortho_loss <= 2
|
python/paddle_fl/paddle_fl/core/submitter/client_base.py | barrierye/PaddleFL | 379 | 12636481 | <reponame>barrierye/PaddleFL
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
class CloudClient(object):
def __init__(self):
pass
def generate_submit_sh(self, job_dir):
with open() as fout:
pass
def generate_job_sh(self, job_dir):
with open() as fout:
pass
def submit(self, **kwargs):
pass
class HPCClient(object):
def __init__(self):
self.conf_dict = {}
def print_args(self):
print("task_name: {}".format(self.task_name))
print("hdfs_path: {}".format(self.hdfs_path))
print("ugi: {}".format(self.ugi))
print("hdfs_output: {}".format(self.hdfs_output))
print("worker_nodes: {}".format(self.worker_nodes))
print("server_nodes: {}".format(self.server_nodes))
print("hadoop_home: {}".format(self.hadoop_home))
print("hpc_home: {}".format(self.hpc_home))
print("train_cmd: {}".format(self.train_cmd))
print("package_path: {}".format(self.package_path))
print("priority: {}".format(self.priority))
print("queue: {}".format(self.queue))
print("server: {}".format(self.server))
print("mpi_node_mem: {}".format(self.mpi_node_mem))
print("pcpu: {}".format(self.pcpu))
print("python_tar: {}".format(self.python_tar))
print("wheel: {}".format(self.wheel))
def check_args(self):
assert self.task_name != ""
assert self.hdfs_path != ""
assert self.ugi != ""
assert self.hdfs_output != ""
assert self.worker_nodes != ""
assert self.server_nodes != ""
assert self.hadoop_home != ""
assert self.hpc_home != ""
assert self.train_cmd != ""
assert self.package_path != ""
assert self.priority != ""
assert self.queue != ""
assert self.server != ""
assert self.mpi_node_mem != ""
assert self.pcpu != ""
assert self.python_tar != ""
assert self.wheel != ""
def generate_qsub_conf(self, job_dir):
with open("{}/qsub.conf".format(job_dir), "w") as fout:
fout.write("SERVER={}\n".format(self.server))
fout.write("QUEUE={}\n".format(self.queue))
fout.write("PRIORITY={}\n".format(self.priority))
fout.write("USE_FLAGS_ADVRES=yes\n")
def generate_submit_sh(self, job_dir):
with open("{}/submit.sh".format(job_dir), "w") as fout:
fout.write("#!/bin/bash\n")
fout.write("unset http_proxy\n")
fout.write("unset https_proxy\n")
fout.write("export HADOOP_HOME={}\n".format(self.hadoop_home))
fout.write("$HADOOP_HOME/bin/hadoop fs -Dhadoop.job.ugi={}"
" -Dfs.default.name={} -rmr {}\n".format(
self.ugi, self.hdfs_path, self.hdfs_output))
fout.write("MPI_NODE_MEM={}\n".format(self.mpi_node_mem))
fout.write("{}/bin/qsub_f -N {} --conf qsub.conf "
"--hdfs {} --ugi {} --hout {} --files ./package "
"-l nodes={},walltime=1000:00:00,pmem-hard={},"
"pcpu-soft={},pnetin-soft=1000,"
"pnetout-soft=1000 job.sh\n".format(
self.hpc_home, self.task_name, self.hdfs_path,
self.ugi, self.hdfs_output,
int(self.worker_nodes) + int(self.server_nodes),
self.mpi_node_mem, self.pcpu))
def generate_job_sh(self, job_dir):
with open("{}/job.sh".format(job_dir), "w") as fout:
fout.write("#!/bin/bash\n")
fout.write("WORKDIR=`pwd`\n")
fout.write("mpirun -npernode 1 mv package/* ./\n")
fout.write("echo 'current dir: '$WORKDIR\n")
fout.write(
"mpirun -npernode 1 tar -zxvf python.tar.gz > /dev/null\n")
fout.write(
"export LIBRARY_PATH=$WORKDIR/python/lib:$LIBRARY_PATH\n")
fout.write("mpirun -npernode 1 python/bin/python -m pip install "
"{} --index-url=http://pip.baidu.com/pypi/simple "
"--trusted-host pip.baidu.com > /dev/null\n".format(
self.wheel))
fout.write("export PATH=python/bin:$PATH\n")
if self.monitor_cmd != "":
fout.write(
"mpirun -npernode 1 -timestamp-output -tag-output -machinefile "
"${{PBS_NODEFILE}} python/bin/{} > monitor.log 2> monitor.elog &\n".
format(self.monitor_cmd))
fout.write(
"mpirun -npernode 1 -timestamp-output -tag-output -machinefile ${PBS_NODEFILE} python/bin/python train_program.py\n"
)
fout.write("if [[ $? -ne 0 ]]; then\n")
fout.write(" echo 'Failed to run mpi!' 1>&2\n")
fout.write(" exit 1\n")
fout.write("fi\n")
def submit(self, **kwargs):
# task_name, output_path
self.task_name = kwargs.get("task_name", "test_submit_job")
self.hdfs_path = kwargs.get("hdfs_path", "")
self.ugi = kwargs.get("ugi", "")
self.hdfs_output = kwargs.get("hdfs_output", "")
self.worker_nodes = str(kwargs.get("worker_nodes", 2))
self.server_nodes = str(kwargs.get("server_nodes", 2))
self.hadoop_home = kwargs.get("hadoop_home", "")
self.hpc_home = kwargs.get("hpc_home", "")
self.train_cmd = kwargs.get("train_cmd", "")
self.monitor_cmd = kwargs.get("monitor_cmd", "")
self.package_path = kwargs.get("package_path", "")
self.priority = kwargs.get("priority", "")
self.queue = kwargs.get("queue", "")
self.server = kwargs.get("server", "")
self.mpi_node_mem = str(kwargs.get("mpi_node_mem", 11000))
self.pcpu = str(kwargs.get("pcpu", 180))
self.python_tar = kwargs.get("python_tar", "")
self.wheel = kwargs.get("wheel", "")
self.print_args()
self.check_args()
jobdir = "{}_jobdir".format(self.task_name)
os.system("mkdir -p {}_jobdir".format(self.task_name))
os.system("rm -rf {}/package".format(jobdir))
os.system("cp -r {} {}/package".format(self.package_path, jobdir))
os.system("cp {} {}/package/".format(self.python_tar, jobdir))
os.system("cp {} {}/package/".format(self.wheel, jobdir))
# make submit dir
self.generate_submit_sh(jobdir)
# generate submit.sh
self.generate_job_sh(jobdir)
# generate job.sh
self.generate_qsub_conf(jobdir)
# run submit
os.system("cd {};sh submit.sh > submit.log 2> submit.elog &".format(
jobdir))
|
examples/gpt-j/prepare_partial.py | lipovsek/PyTorch-LIT | 151 | 12636491 | <reponame>lipovsek/PyTorch-LIT<gh_stars>100-1000
from pytorch_lit import prepare_params, PartialLoader
if __name__ == "__main__":
weights = PartialLoader("../../../lab/gpt-j-6B-f16/pytorch_model.bin")
prepare_params(weights, ".models/gpt-j-6b-lit", dtype="float16")
|
Projects/OLED_Weather Station/weather_station.py | benmcclelland/GrovePi | 482 | 12636499 | <reponame>benmcclelland/GrovePi
# Adapted from home_temp_hum_display.py
'''
The MIT License (MIT)
GrovePi for the Raspberry Pi: an open source platform for connecting Grove Sensors to the Raspberry Pi.
Copyright (C) 2017 Dexter Industries
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
from grovepi import *
from grove_oled import *
import threading
dht_sensor_port = 7 # Connect the DHt sensor to port 7
#Start and initialize the OLED
oled_init()
oled_clearDisplay()
oled_setNormalDisplay()
oled_setVerticalMode()
time.sleep(.1)
def get_outside_weather(location='Bucharest,ro'):
import pyowm # Do a 'sudo pip install pyowm' to get this module
owm = pyowm.OWM()
#forecast = owm.daily_forecast(location)
observation = owm.weather_at_place(location)
weather = observation.get_weather()
return weather
def update_outside_weather():
# This uses OpenWeatherMap via the PyOWM module;
# pywom module needs to be installed via pip,
# see https://github.com/csparpa/pyowm
weather = get_outside_weather()
# by default location is Bucharest,ro; change it to your own
oled_setTextXY(5, 1)
oled_putString("OUTSIDE")
oled_setTextXY(7, 0)
oled_putString("Temp:")
oled_putString(str(weather.get_temperature("celsius")['temp']) + "C")
oled_setTextXY(8, 0)
oled_putString("Hum :")
oled_putString(str(weather.get_humidity()) + "%")
oled_setTextXY(9, 0)
oled_putString("Rain:")
rain = weather.get_rain()
if len(rain) > 0:
pass
else:
oled_putString("0%")
print(("Weather: ", weather.get_temperature("celsius")))
print(("Humidity: ", weather.get_humidity()))
while True:
try:
# Get the temperature and Humidity from the DHT sensor
[temp, hum] = dht(dht_sensor_port, 1)
print(("Temp =", temp, "C\tHumidity =", hum, "%"))
t = str(temp)
h = str(hum)
#outside_thread = threading.Thread(target=update_outside_weather)
#outside_thread.start()
oled_setTextXY(0, 1) # Print "INSIDE" at line 1
oled_putString("INSIDE")
oled_setTextXY(2, 0) # Print "TEMP" and the temperature in line 3
oled_putString("Temp:")
oled_putString(t + "C")
oled_setTextXY(3, 0) # Print "HUM :" and the humidity in line 4
oled_putString("Hum :")
oled_putString(h + "%")
#outside_thread.join()
update_outside_weather()
except (IOError, TypeError, Exception) as e:
print(("Error:" + str(e)))
finally:
#outside_thread.join()
pass
|
tests/prune_tests.py | dantaki/svtools | 120 | 12636534 | from unittest import TestCase, main
import os
import sys
import tempfile
import difflib
import svtools.prune
class IntegrationTestPrune(TestCase):
def run_integration_test(self):
test_directory = os.path.dirname(os.path.abspath(__file__))
test_data_dir = os.path.join(test_directory, 'test_data', 'prune')
input = os.path.join(test_data_dir, 'input.no_missing.bed')
expected_result = os.path.join(test_data_dir, 'expected.no_missing.bed')
temp_descriptor, temp_output_path = tempfile.mkstemp(suffix='.bed')
with open(input) as input_handle, os.fdopen(temp_descriptor, 'w') as output_handle:
pruner = svtools.prune.Pruner(50, None)
pruner.cluster_bedpe(input_handle, output_handle, False)
expected_lines = open(expected_result).readlines()
produced_lines = open(temp_output_path).readlines()
diff = difflib.unified_diff(produced_lines, expected_lines, fromfile=temp_output_path, tofile=expected_result)
result = ''.join(diff)
if result != '':
for line in result:
sys.stdout.write(line)
self.assertFalse(result)
os.remove(temp_output_path)
def run_sname_integration_test(self):
test_directory = os.path.dirname(os.path.abspath(__file__))
test_data_dir = os.path.join(test_directory, 'test_data', 'prune')
input = os.path.join(test_data_dir, 'input.sname_merge.bedpe')
expected_result = os.path.join(test_data_dir, 'expected.sname_merge.bedpe')
temp_descriptor, temp_output_path = tempfile.mkstemp(suffix='.bed')
with open(input) as input_handle, os.fdopen(temp_descriptor, 'w') as output_handle:
pruner = svtools.prune.Pruner(50, None)
pruner.cluster_bedpe(input_handle, output_handle, False)
expected_lines = open(expected_result).readlines()
produced_lines = open(temp_output_path).readlines()
diff = difflib.unified_diff(produced_lines, expected_lines, fromfile=temp_output_path, tofile=expected_result)
result = ''.join(diff)
if result != '':
for line in result:
sys.stdout.write(line)
self.assertFalse(result)
os.remove(temp_output_path)
def run_sname_multiprune_integration_test(self):
test_directory = os.path.dirname(os.path.abspath(__file__))
test_data_dir = os.path.join(test_directory, 'test_data', 'prune')
input = os.path.join(test_data_dir, 'input.multi_prune.bedpe')
expected_result = os.path.join(test_data_dir, 'expected.multi_prune.bedpe')
temp_descriptor, temp_output_path = tempfile.mkstemp(suffix='.bed')
with open(input) as input_handle, os.fdopen(temp_descriptor, 'w') as output_handle:
pruner = svtools.prune.Pruner(50, None)
pruner.cluster_bedpe(input_handle, output_handle, False)
expected_lines = open(expected_result).readlines()
produced_lines = open(temp_output_path).readlines()
diff = difflib.unified_diff(produced_lines, expected_lines, fromfile=temp_output_path, tofile=expected_result)
result = ''.join(diff)
if result != '':
for line in result:
sys.stdout.write(line)
self.assertFalse(result)
os.remove(temp_output_path)
def run_sname_duplicate_output_integration_test(self):
test_directory = os.path.dirname(os.path.abspath(__file__))
test_data_dir = os.path.join(test_directory, 'test_data', 'prune')
input = os.path.join(test_data_dir, 'input.dup_lines.bed')
expected_result = os.path.join(test_data_dir, 'expected.dup_lines.bed')
temp_descriptor, temp_output_path = tempfile.mkstemp(suffix='.bed')
with open(input) as input_handle, os.fdopen(temp_descriptor, 'w') as output_handle:
pruner = svtools.prune.Pruner(75, None)
pruner.cluster_bedpe(input_handle, output_handle, False)
expected_lines = open(expected_result).readlines()
produced_lines = open(temp_output_path).readlines()
diff = difflib.unified_diff(produced_lines, expected_lines, fromfile=temp_output_path, tofile=expected_result)
result = ''.join(diff)
if result != '':
for line in result:
sys.stdout.write(line)
self.assertFalse(result)
os.remove(temp_output_path)
if __name__ == "__main__":
main()
|
goless/compat.py | timgates42/goless | 266 | 12636554 | import sys
PY3 = sys.version_info[0] == 3
if PY3:
# noinspection PyShadowingBuiltins
range = range
maxint = sys.maxsize
# noinspection PyUnusedLocal
def reraise(e, v, origex):
raise e(v).with_traceback(origex.__traceback__)
else:
# noinspection PyShadowingBuiltins
range = xrange
maxint = sys.maxint
exec("""def reraise(e, v, origex):
tb = sys.exc_info()[2]
raise e, v, tb""")
|
autox/autoxserver.py | fanghy06/AutoX | 499 | 12636593 | <reponame>fanghy06/AutoX<gh_stars>100-1000
from autox.autox_server.ensemble import ensemble
from autox.autox_server.feature_engineer import fe_count, fe_onehot, fe_shift, fe_time_diff
from autox.autox_server.feature_engineer import fe_kv, fe_stat_for_same_prefix, fe_frequency
from autox.autox_server.feature_engineer import fe_time_count, fe_window_count, fe_time_rolling_count
from autox.autox_server.feature_engineer import fe_window2, fe_txt
from autox.autox_server.join_table import join_table
from autox.autox_server.model import lgb_with_fe, lgb_for_feature_selection
from autox.autox_server.model import model_util
from autox.autox_server.pre_process import process_1, process_2, process_3
from autox.autox_server.read_data import read_data
from autox.autox_server.util import log, load_obj
from autox.autox_server.util import merge_table, save_obj
class AutoXServer():
def __init__(self, is_train, server_name, data_info_path=None, train_set_path=None):
if is_train:
assert(data_info_path is not None and train_set_path is not None)
else:
assert (data_info_path is None and train_set_path is None)
self.is_train = is_train
self.data_info_path = data_info_path
self.train_set_path = train_set_path
self.server_name = server_name
def fit(self):
data_name = self.server_name
log("data name: {}".format(data_name))
lgb_para_dict_1 = model_util.lgb_para_dict_1
lgb_para_dict_2 = model_util.lgb_para_dict_2
params_1 = model_util.params_1
params_2 = model_util.params_2
self.G_hist = {}
self.G_hist['val_auc'] = {}
self.G_hist['predict'] = {}
self.G_hist['delete_column'] = {}
phase = 'train'
log("*** phase: {}".format(phase))
is_train = True if phase == 'train' else False
self.G_df_dict, self.G_data_info, remain_time = read_data.read_data(data_info_path=self.data_info_path,
train_set_path=self.train_set_path, is_train=is_train, debug=False)
remain_time = process_1.preprocess(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
remain_time = join_table.join_simple_tables(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
remain_time = process_2.preprocess_2(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
remain_time = join_table.join_indirect_1_to_M_tables(self.G_df_dict, self.G_data_info, self.G_hist, is_train=is_train, remain_time=remain_time)
remain_time = join_table.preprocess_after_join_indirect_tables(self.G_df_dict, self.G_data_info, self.G_hist, is_train=is_train, remain_time=remain_time)
remain_time = join_table.join_1_to_M_tables(self.G_df_dict, self.G_data_info, self.G_hist, is_train=is_train, remain_time=remain_time)
remain_time = process_3.preprocess_3(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
remain_time = fe_kv.fe_kv(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_stat_for_same_prefix.fe_stat_for_same_prefix(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_frequency.fe_frequency(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_count.fe_count(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_shift.fe_shift(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_time_diff.fe_time_diff(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_time_count.fe_time_count(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_window_count.fe_window_count(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_time_rolling_count.fe_time_rolling_count(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_window2.fe_window2(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_onehot.fe_onehot(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
remain_time = fe_txt.fe_txt(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
remain_time = merge_table(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
exp_name = 'feature_selection'
remain_time = lgb_for_feature_selection.lgb_for_feature_selection(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, params_1, lgb_para_dict_1, data_name, exp_name)
exp_name_1 = 'fe_lgb'
remain_time = lgb_with_fe.lgb_with_fe(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, params_1, lgb_para_dict_1, data_name, exp_name_1)
exp_name_2 = 'fe_lgb_2'
remain_time = lgb_with_fe.lgb_with_fe(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, params_2, lgb_para_dict_2, data_name, exp_name_2)
_ = ensemble.ensemble(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, top_k=2)
def predict(self, df=None, test_set_path=None):
assert ((df is None and test_set_path is not None) or (df is not None and test_set_path is None))
data_name = self.server_name
lgb_para_dict_1 = model_util.lgb_para_dict_1
lgb_para_dict_2 = model_util.lgb_para_dict_2
params_1 = model_util.params_1
params_2 = model_util.params_2
phase = 'test'
log("*** phase: {}".format(phase))
remain_time = 1e10
is_train = True if phase == 'train' else False
self.G_df_dict, self.G_data_info, remain_time = read_data.read_data(data_info=self.G_data_info, test_set_path=test_set_path, df_dict=self.G_df_dict,
is_train=is_train, debug=False, remain_time=remain_time)
remain_time = process_1.preprocess(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
remain_time = join_table.join_simple_tables(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
remain_time = process_2.preprocess_2(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
remain_time = join_table.join_indirect_1_to_M_tables(self.G_df_dict, self.G_data_info, self.G_hist, is_train=is_train, remain_time=remain_time)
remain_time = join_table.preprocess_after_join_indirect_tables(self.G_df_dict, self.G_data_info, self.G_hist, is_train=is_train, remain_time=remain_time)
remain_time = join_table.join_1_to_M_tables(self.G_df_dict, self.G_data_info, self.G_hist, is_train=is_train, remain_time=remain_time)
remain_time = process_3.preprocess_3(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
remain_time = fe_kv.fe_kv(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_stat_for_same_prefix.fe_stat_for_same_prefix(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_frequency.fe_frequency(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_count.fe_count(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_shift.fe_shift(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_time_diff.fe_time_diff(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_time_count.fe_time_count(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_window_count.fe_window_count(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_time_rolling_count.fe_time_rolling_count(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_window2.fe_window2(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, False)
remain_time = fe_onehot.fe_onehot(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
remain_time = fe_txt.fe_txt(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
remain_time = merge_table(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time)
exp_name = 'feature_selection'
remain_time = lgb_for_feature_selection.lgb_for_feature_selection(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, params_1, lgb_para_dict_1, data_name, exp_name)
exp_name_1 = 'fe_lgb'
remain_time = lgb_with_fe.lgb_with_fe(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, params_1, lgb_para_dict_1, data_name, exp_name_1)
exp_name_2 = 'fe_lgb_2'
remain_time = lgb_with_fe.lgb_with_fe(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, params_2, lgb_para_dict_2, data_name, exp_name_2)
_ = ensemble.ensemble(self.G_df_dict, self.G_data_info, self.G_hist, is_train, remain_time, top_k=2)
sub = self.G_hist['predict']['ensemble']
sub.index = range(len(sub))
return sub
def save_server(self, path):
data_name = self.server_name
save_obj(self.G_df_dict, path + f'/{data_name}_G_df_dict.pkl')
save_obj(self.G_data_info, path + f'/{data_name}_G_data_info.pkl')
save_obj(self.G_hist, path + f'/{data_name}_G_hist.pkl')
def load_server(self, path):
data_name = self.server_name
self.G_df_dict = load_obj(path + f'/{data_name}_G_df_dict.pkl')
self.G_data_info = load_obj(path + f'/{data_name}_G_data_info.pkl')
self.G_hist = load_obj(path + f'/{data_name}_G_hist.pkl')
|
dreamer/networks/proprio.py | jsikyoon/dreamer-1 | 546 | 12636598 | <filename>dreamer/networks/proprio.py
# Copyright 2019 The Dreamer Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from dreamer import tools
def encoder(obs, keys=None, num_layers=3, units=300, activation=tf.nn.relu):
if not keys:
keys = [key for key in obs.keys() if key != 'image']
inputs = tf.concat([obs[key] for key in keys], -1)
hidden = tf.reshape(inputs, [-1] + inputs.shape[2:].as_list())
for _ in range(num_layers):
hidden = tf.layers.dense(hidden, units, activation)
hidden = tf.reshape(hidden, tools.shape(inputs)[:2] + [
hidden.shape[1].value])
return hidden
|
fastapi_amis_admin/amis_admin/site.py | amisadmin/fastapi_amis_admin | 166 | 12636631 | <gh_stars>100-1000
"""This package is for compatibility with v0.1.8 and below,v0.3.0 and above will be removed. """
from fastapi_amis_admin.admin.site import (
AdminSite,
DocsAdmin,
FileAdmin,
HomeAdmin,
ReDocsAdmin,
) |
scripts/internal/print_downloads.py | odormond/psutil | 8,285 | 12636659 | #!/usr/bin/env python3
# Copyright (c) 2009 <NAME>'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Print PYPI statistics in MarkDown format.
Useful sites:
* https://pepy.tech/project/psutil
* https://pypistats.org/packages/psutil
* https://hugovk.github.io/top-pypi-packages/
"""
from __future__ import print_function
import json
import os
import subprocess
import sys
import pypinfo # NOQA
from psutil._common import memoize
AUTH_FILE = os.path.expanduser("~/.pypinfo.json")
PKGNAME = 'psutil'
DAYS = 30
LIMIT = 100
GITHUB_SCRIPT_URL = "https://github.com/giampaolo/psutil/blob/master/" \
"scripts/internal/pypistats.py"
LAST_UPDATE = None
bytes_billed = 0
# --- get
@memoize
def sh(cmd):
assert os.path.exists(AUTH_FILE)
env = os.environ.copy()
env['GOOGLE_APPLICATION_CREDENTIALS'] = AUTH_FILE
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, universal_newlines=True)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise RuntimeError(stderr)
assert not stderr, stderr
return stdout.strip()
@memoize
def query(cmd):
global bytes_billed
ret = json.loads(sh(cmd))
bytes_billed += ret['query']['bytes_billed']
return ret
def top_packages():
global LAST_UPDATE
ret = query("pypinfo --all --json --days %s --limit %s '' project" % (
DAYS, LIMIT))
LAST_UPDATE = ret['last_update']
return [(x['project'], x['download_count']) for x in ret['rows']]
def ranking():
data = top_packages()
i = 1
for name, downloads in data:
if name == PKGNAME:
return i
i += 1
raise ValueError("can't find %s" % PKGNAME)
def downloads():
data = top_packages()
for name, downloads in data:
if name == PKGNAME:
return downloads
raise ValueError("can't find %s" % PKGNAME)
def downloads_pyver():
return query("pypinfo --json --days %s %s pyversion" % (DAYS, PKGNAME))
def downloads_by_country():
return query("pypinfo --json --days %s %s country" % (DAYS, PKGNAME))
def downloads_by_system():
return query("pypinfo --json --days %s %s system" % (DAYS, PKGNAME))
def downloads_by_distro():
return query("pypinfo --json --days %s %s distro" % (DAYS, PKGNAME))
# --- print
templ = "| %-30s | %15s |"
def print_row(left, right):
if isinstance(right, int):
right = '{0:,}'.format(right)
print(templ % (left, right))
def print_header(left, right="Downloads"):
print_row(left, right)
s = templ % ("-" * 30, "-" * 15)
print("|:" + s[2:-2] + ":|")
def print_markdown_table(title, left, rows):
pleft = left.replace('_', ' ').capitalize()
print("### " + title)
print()
print_header(pleft)
for row in rows:
lval = row[left]
print_row(lval, row['download_count'])
print()
def main():
downs = downloads()
print("# Download stats")
print("")
s = "psutil download statistics of the last %s days (last update " % DAYS
s += "*%s*).\n" % LAST_UPDATE
s += "Generated via [pypistats.py](%s) script.\n" % GITHUB_SCRIPT_URL
print(s)
data = [
{'what': 'Per month', 'download_count': downs},
{'what': 'Per day', 'download_count': int(downs / 30)},
{'what': 'PYPI ranking', 'download_count': ranking()}
]
print_markdown_table('Overview', 'what', data)
print_markdown_table('Operating systems', 'system_name',
downloads_by_system()['rows'])
print_markdown_table('Distros', 'distro_name',
downloads_by_distro()['rows'])
print_markdown_table('Python versions', 'python_version',
downloads_pyver()['rows'])
print_markdown_table('Countries', 'country',
downloads_by_country()['rows'])
if __name__ == '__main__':
try:
main()
finally:
print("bytes billed: %s" % bytes_billed, file=sys.stderr)
|
ykdl/extractors/douyin/__init__.py | netlovehf/ykdl | 1,153 | 12636669 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from ykdl.util.html import get_location, add_header
def get_extractor(url):
if '/v.' in url:
add_header('User-Agent',
'Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) '
'AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 '
'Mobile/15E148 Safari/604.1')
url = get_location(url)
if '/live.' in url or 'amemv.com' in url:
from . import live as s
else:
from . import video as s
return s.site, url
|
examples/tucker/make_annotation.py | kvetab/bio_embeddings | 219 | 12636677 | <reponame>kvetab/bio_embeddings<filename>examples/tucker/make_annotation.py<gh_stars>100-1000
"""This script was used to generate tucker_annotations.csv. You shouldn't need to use it"""
from pathlib import Path
import pandas
def class_to_label(cath_class: int) -> str:
"""See http://www.cathdb.info/browse/tree"""
mapping = {
1: "Mainly Alpha",
2: "Mainly Beta",
3: "Alpha Beta",
4: "Few secondary structures",
6: "Special",
}
return mapping[cath_class]
def main():
# Download this file from
# http://download.cathdb.info/cath/releases/all-releases/v4_3_0/cath-classification-data/cath-domain-list-v4_3_0.txt
mapping_df = pandas.read_fwf(
"cath-domain-list-v4_3_0.txt",
comment="#",
colspecs=[(0, 7), (7, 13), (13, 19), (19, 25), (25, 31)],
usecols=[0, 1, 2, 3, 4],
names=["domain", "C", "A", "T", "H"],
)
fasta_file = "tucker_cath.fasta"
ids = [i[1:] for i in Path(fasta_file).read_text().splitlines()[::2]]
mapping = {
domain: class_to_label(cath_class)
for domain, cath_class in mapping_df[["domain", "C"]].itertuples(index=False)
}
records = [(i, mapping[i]) for i in ids]
label_df = pandas.DataFrame.from_records(records, columns=["identifier", "label"])
label_df.to_csv("cath_annotations_class.csv", index=False)
if __name__ == "__main__":
main()
|
nova/console/securityproxy/base.py | zjzh/nova | 1,874 | 12636692 | <gh_stars>1000+
# Copyright (c) 2014-2016 Red Hat, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
class SecurityProxy(metaclass=abc.ABCMeta):
"""A console security Proxy Helper
Console security proxy helpers should subclass
this class and implement a generic `connect`
for the particular protocol being used.
Security drivers can then subclass the
protocol-specific helper class.
"""
@abc.abstractmethod
def connect(self, tenant_sock, compute_sock):
"""Initiate the console connection
This method performs the protocol specific
negotiation, and returns the socket-like
object to use to communicate with the server
securely.
:param tenant_sock: socket connected to the remote tenant user
:param compute_sock: socket connected to the compute node instance
:returns: a new compute_sock for the instance
"""
pass
|
ceilometer/hardware/inspector/__init__.py | maestro-hybrid-cloud/ceilometer | 239 | 12636706 | #
# Copyright 2014 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from stevedore import driver
def get_inspector(parsed_url, namespace='ceilometer.hardware.inspectors'):
"""Get inspector driver and load it.
:param parsed_url: urlparse.SplitResult object for the inspector
:param namespace: Namespace to use to look for drivers.
"""
loaded_driver = driver.DriverManager(namespace, parsed_url.scheme)
return loaded_driver.driver()
|
examples/subgraph_matching/train_single_proc.py | ruth-ann/deepsnap | 412 | 12636737 | <reponame>ruth-ann/deepsnap
"""Train the order embedding model"""
import argparse
from collections import defaultdict
from itertools import permutations
import pickle
from queue import PriorityQueue
import os
import random
import time
from deepsnap.batch import Batch
import networkx as nx
import numpy as np
from sklearn.manifold import TSNE
from sklearn.metrics import roc_auc_score, confusion_matrix
from sklearn.metrics import precision_recall_curve
import torch
import torch.nn as nn
import torch.multiprocessing as mp
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torch_geometric.data import DataLoader
from torch_geometric.datasets import TUDataset
import torch_geometric.utils as pyg_utils
import torch_geometric.nn as pyg_nn
import data
#import data_random_basis as data
import models
import utils
#import matplotlib.pyplot as plt
#import matplotlib.colors as mcolors
def arg_parse():
parser = argparse.ArgumentParser(description='GNN arguments.')
parser.add_argument('--conv_type', type=str,
help='type of model')
parser.add_argument('--method_type', type=str,
help='type of convolution')
parser.add_argument('--batch_size', type=int,
help='Training batch size')
parser.add_argument('--n_layers', type=int,
help='Number of graph conv layers')
parser.add_argument('--hidden_dim', type=int,
help='Training hidden size')
parser.add_argument('--max_graph_size', type=int,
help='max training graph size')
parser.add_argument('--n_batches', type=int,
help='Number of training minibatches')
parser.add_argument('--margin', type=float,
help='margin for loss')
parser.add_argument('--dataset', type=str,
help='Dataset')
parser.add_argument('--dataset_type', type=str,
help='"otf-syn" or "syn" or "real"')
parser.add_argument('--eval_interval', type=int,
help='how often to eval during training')
parser.add_argument('--val_size', type=int,
help='validation set size')
parser.add_argument('--model_path', type=str,
help='path to save/load model')
parser.add_argument('--start_weights', type=str,
help='file to load weights from')
parser.add_argument('--test', action="store_true")
parser.add_argument('--n_workers', type=int)
parser.set_defaults(conv_type='SAGE',
method_type='order',
dataset='enzymes',
dataset_type='real',
n_layers=4,
batch_size=64,
hidden_dim=64,
dropout=0.0,
n_batches=1000000,
lr=1e-4,
margin=0.1,
test_set='',
eval_interval=100,
n_workers=4,
model_path="ckpt/model.pt",
start_weights='',
max_graph_size=20,
val_size=1024)
return parser.parse_args()
def build_model(args):
# build model
# set the input dimension to be the dimension of node labels of the dataset
if args.dataset == "enzymes":
dim = 3
elif args.dataset == "cox2":
dim = 35
elif args.dataset == "imdb-binary":
dim = 1
model = models.BaselineMLP(dim, args.hidden_dim, args)
model.to(utils.get_device())
if args.start_weights:
model.load_state_dict(torch.load(args.start_weights,
map_location=utils.get_device()))
return model
def train_epoch(args, model, data_source, opt):
"""Train the order embedding model.
args: Commandline arguments
"""
# data_source = data.DataSource(dataset_name)
batch_num = 0
#for batch_num in range(args.n_batches):
loaders = data_source.gen_data_loaders(args.batch_size, train=True)
for batch_target, batch_neg_target, batch_neg_query in zip(*loaders):
# train
model.train()
model.zero_grad()
pos_a, pos_b, neg_a, neg_b = data_source.gen_batch(batch_target,
batch_neg_target, batch_neg_query, True)
pos_a = pos_a.to(utils.get_device())
pos_b = pos_b.to(utils.get_device())
neg_a = neg_a.to(utils.get_device())
neg_b = neg_b.to(utils.get_device())
emb_pos_a, emb_pos_b = model.emb_model(pos_a), model.emb_model(pos_b)
emb_neg_a, emb_neg_b = model.emb_model(neg_a), model.emb_model(neg_b)
emb_as = torch.cat((emb_pos_a, emb_neg_a), dim=0)
emb_bs = torch.cat((emb_pos_b, emb_neg_b), dim=0)
labels = torch.tensor([1]*pos_a.num_graphs + [0]*neg_a.num_graphs).to(
utils.get_device())
pred = model(emb_as, emb_bs)
loss = model.criterion(pred, labels)
loss.backward()
if not args.test:
opt.step()
pred = model.predict(pred)
train_acc = torch.mean((pred == labels).type(torch.float))
train_loss = loss.item()
print("Batch {}. Loss: {:.4f}. Training acc: {:.4f}".format(
batch_num, train_loss, train_acc), end=" \r")
batch_num += 1
#logger.add_scalar("Loss/train", train_loss, 0)
#logger.add_scalar("Accuracy/train", train_acc, 0)
def validation(args, model, data_source, logger, batch_n):
# test on new motifs
model.eval()
all_raw_preds, all_preds, all_labels = [], [], []
loaders = data_source.gen_data_loaders(args.batch_size, train=False)
for batch_target, batch_neg_target, batch_neg_query in zip(*loaders):
pos_a, pos_b, neg_a, neg_b = data_source.gen_batch(batch_target,
batch_neg_target, batch_neg_query, False)
pos_a = pos_a.to(utils.get_device())
pos_b = pos_b.to(utils.get_device())
neg_a = neg_a.to(utils.get_device())
neg_b = neg_b.to(utils.get_device())
with torch.no_grad():
if args.dataset_type in ["real", "otf-syn"]:
emb_pos_a, emb_pos_b = (model.emb_model(pos_a),
model.emb_model(pos_b))
emb_neg_a, emb_neg_b = (model.emb_model(neg_a),
model.emb_model(neg_b))
emb_as = torch.cat((emb_pos_a, emb_neg_a), dim=0)
emb_bs = torch.cat((emb_pos_b, emb_neg_b), dim=0)
labels = torch.tensor([1]*pos_a.num_graphs +
[0]*neg_a.num_graphs).to(utils.get_device())
raw_pred = model(emb_as, emb_bs)
pred = model.predict(raw_pred)
raw_pred = raw_pred[:,1]
all_raw_preds.append(raw_pred)
all_preds.append(pred)
all_labels.append(labels)
pred = torch.cat(all_preds, dim=-1)
labels = torch.cat(all_labels, dim=-1)
raw_pred = torch.cat(all_raw_preds, dim=-1)
acc = torch.mean((pred == labels).type(torch.float))
prec = (torch.sum(pred * labels).item() / torch.sum(pred).item() if
torch.sum(pred) > 0 else float("NaN"))
recall = (torch.sum(pred * labels).item() /
torch.sum(labels).item() if torch.sum(labels) > 0 else
float("NaN"))
labels = labels.detach().cpu().numpy()
raw_pred = raw_pred.detach().cpu().numpy()
pred = pred.detach().cpu().numpy()
auroc = roc_auc_score(labels, raw_pred)
tn, fp, fn, tp = confusion_matrix(labels, pred).ravel()
print("\nValidation. Acc: {:.4f}. "
"P: {:.4f}. R: {:.4f}. AUROC: {:.4f}\n "
"TN: {}. FP: {}. FN: {}. TP: {}".format(
acc, prec, recall, auroc,
tn, fp, fn, tp))
logger.add_scalar("Accuracy/test", acc, batch_n)
logger.add_scalar("Precision/test", prec, batch_n)
logger.add_scalar("Recall/test", recall, batch_n)
logger.add_scalar("AUROC/test", auroc, batch_n)
logger.add_scalar("TP/test", tp, batch_n)
logger.add_scalar("TN/test", tn, batch_n)
logger.add_scalar("FP/test", fp, batch_n)
logger.add_scalar("FN/test", fn, batch_n)
def main():
args = arg_parse()
# see test-tube
#args = hyp_search.hyp_arg_parse()
if not os.path.exists(os.path.dirname(args.model_path)):
os.makedirs(os.path.dirname(args.model_path))
print("Starting {} workers".format(args.n_workers))
print("Using dataset {}".format(args.dataset))
record_keys = ["conv_type", "n_layers", "hidden_dim",
"margin", "dataset", "dataset_type", "max_graph_size", "skip"]
args_str = ".".join(["{}={}".format(k, v)
for k, v in sorted(vars(args).items()) if k in record_keys])
logger = SummaryWriter("log/" + args_str)
model = build_model(args)
data_source = data.DataSource(args.dataset)
opt = optim.Adam(model.parameters(), args.lr)
if args.test:
validation(args, model, data_source, logger, 0, make_pr_curve=True)
else:
batch_n = 0
for epoch in range(args.n_batches // args.eval_interval):
print("Epoch", epoch)
train_epoch(args, model, data_source, opt)
validation(args, model, data_source, logger, batch_n)
if not args.test:
print("Saving {}".format(args.model_path))
torch.save(model.state_dict(), args.model_path)
if __name__ == '__main__':
main()
|
tools/dom/new_scripts/compiler.py | omerlevran46/sdk | 8,969 | 12636748 | #!/usr/bin/env python3
# Copyright (C) 2014 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Compile an .idl file to Dart bindings (.h and .cpp files).
Design doc: ??????
"""
from optparse import OptionParser
import os
import sys
dart_script_path = os.path.dirname(os.path.abspath(__file__))
script_path = os.path.join(
os.path.dirname(os.path.dirname(dart_script_path)), 'scripts')
sys.path.extend([script_path])
from dart_compiler import IdlCompiler
from code_generator_dart import CodeGeneratorDart
def parse_options():
parser = OptionParser()
parser.add_option('--output-directory')
parser.add_option('--interfaces-info-file')
parser.add_option('--write-file-only-if-changed', type='int', default='1')
parser.add_option('--generate-global', type='int')
# ensure output comes last, so command line easy to parse via regexes
parser.disable_interspersed_args()
options, args = parser.parse_args()
if options.output_directory is None:
parser.error('Must specify output directory using --output-directory.')
options.write_file_only_if_changed = bool(
options.write_file_only_if_changed)
options.generate_global = bool(options.generate_global)
if len(args) != 1:
# parser.error('Must specify exactly 1 input file as argument, but %d given.' % len(args))
return options, None
idl_filename = os.path.realpath(args[0])
return options, idl_filename
def idl_filename_to_interface_name(idl_filename):
basename = os.path.basename(idl_filename)
interface_name, _ = os.path.splitext(basename)
return interface_name
class IdlCompilerDart(IdlCompiler):
def __init__(self, *args, **kwargs):
IdlCompiler.__init__(self, *args, **kwargs)
interfaces_info = self.interfaces_info
self.output_directory = self.output_directory
self.code_generator = CodeGeneratorDart(interfaces_info,
self.output_directory)
def compile_file(self, idl_filename):
interface_name = idl_filename_to_interface_name(idl_filename)
header_filename = os.path.join(self.output_directory,
'Dart%s.h' % interface_name)
cpp_filename = os.path.join(self.output_directory,
'Dart%s.cpp' % interface_name)
return self.compile_and_write(idl_filename,
(header_filename, cpp_filename))
def generate_global(self):
global_header_filename = os.path.join(self.output_directory,
'DartWebkitClassIds.h')
global_cpp_filename = os.path.join(self.output_directory,
'DartWebkitClassIds.cpp')
self.generate_global_and_write((global_header_filename,
global_cpp_filename))
def main():
options, idl_filename = parse_options()
if options.generate_global:
idl_compiler = IdlCompilerDart(
options.output_directory,
interfaces_info_filename=options.interfaces_info_file,
only_if_changed=options.write_file_only_if_changed)
idl_compiler.generate_global()
else:
idl_compiler = IdlCompilerDart(
options.output_directory,
interfaces_info_filename=options.interfaces_info_file,
only_if_changed=options.write_file_only_if_changed)
idl_compiler.compile_file(idl_filename)
if __name__ == '__main__':
sys.exit(main())
|
test/unit_tests/braket/circuits/test_result_type.py | orclassiq/amazon-braket-sdk-python | 151 | 12636757 | # Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import pytest
from braket.circuits import Observable, ObservableResultType, ResultType
@pytest.fixture
def result_type():
return ResultType(ascii_symbols=["foo"])
@pytest.fixture
def prob():
return ResultType.Probability([0, 1])
@pytest.fixture
def sv():
return ResultType.StateVector()
@pytest.mark.xfail(raises=ValueError)
def test_none_ascii():
ResultType(ascii_symbols=None)
def test_name(result_type):
expected = result_type.__class__.__name__
assert result_type.name == expected
def test_ascii_symbol():
ascii_symbols = ["foo"]
result_type = ResultType(ascii_symbols=ascii_symbols)
assert result_type.ascii_symbols == ascii_symbols
def test_equality_statevector():
result1 = ResultType.StateVector()
result2 = ResultType.StateVector()
result3 = ResultType.Probability([1])
result4 = "hi"
assert result1 == result2
assert result1 != result3
assert result1 != result4
def test_equality_densitymatrix():
result1 = ResultType.DensityMatrix()
result2 = ResultType.DensityMatrix()
result3 = ResultType.StateVector()
result4 = "foo"
assert result1 == result2
assert result1 != result3
assert result1 != result4
@pytest.mark.xfail(raises=AttributeError)
def test_ascii_symbol_setter(result_type):
result_type.ascii_symbols = ["bar"]
@pytest.mark.xfail(raises=AttributeError)
def test_name_setter(result_type):
result_type.name = "hi"
@pytest.mark.xfail(raises=NotImplementedError)
def test_to_ir_not_implemented_by_default(result_type):
result_type.to_ir(None)
def test_register_result():
class _FooResultType(ResultType):
def __init__(self):
super().__init__(ascii_symbols=["foo"])
ResultType.register_result_type(_FooResultType)
assert ResultType._FooResultType().name == _FooResultType().name
def test_copy_creates_new_object(prob):
copy = prob.copy()
assert copy == prob
assert copy is not prob
def test_copy_with_mapping_target(sv):
target_mapping = {0: 10, 1: 11}
expected = ResultType.StateVector()
assert sv.copy(target_mapping=target_mapping) == expected
def test_copy_with_mapping_target_hasattr(prob):
target_mapping = {0: 10, 1: 11}
expected = ResultType.Probability([10, 11])
assert prob.copy(target_mapping=target_mapping) == expected
def test_copy_with_target_hasattr(prob):
target = [10, 11]
expected = ResultType.Probability(target)
assert prob.copy(target=target) == expected
def test_copy_with_target(sv):
target = [10, 11]
expected = ResultType.StateVector()
assert sv.copy(target=target) == expected
@pytest.mark.xfail(raises=TypeError)
def test_copy_with_target_and_mapping(prob):
prob.copy(target=[10], target_mapping={0: 10})
# ObservableResultType
@pytest.mark.xfail(raises=ValueError)
def test_expectation_init_value_error_target():
ObservableResultType(
ascii_symbols=["Obs", "Obs"], observable=Observable.X() @ Observable.Y(), target=[]
)
@pytest.mark.xfail(raises=ValueError)
def test_expectation_init_value_error_ascii_symbols():
ObservableResultType(
ascii_symbols=["Obs"], observable=Observable.X() @ Observable.Y(), target=[1, 2]
)
@pytest.mark.xfail(raises=ValueError)
def test_obs_rt_init_value_error_qubit_count():
ObservableResultType(ascii_symbols=["Obs"], observable=Observable.X(), target=[0, 1])
def test_obs_rt_equality():
a1 = ObservableResultType(ascii_symbols=["Obs"], observable=Observable.X(), target=0)
a2 = ObservableResultType(ascii_symbols=["Obs"], observable=Observable.X(), target=0)
a3 = ObservableResultType(ascii_symbols=["Obs"], observable=Observable.X(), target=1)
a4 = "hi"
assert a1 == a2
assert a1 != a3
assert a1 != a4
assert ResultType.Variance(observable=Observable.Y(), target=0) != ResultType.Expectation(
observable=Observable.Y(), target=0
)
def test_obs_rt_repr():
a1 = ObservableResultType(ascii_symbols=["Obs"], observable=Observable.X(), target=0)
assert (
str(a1)
== "ObservableResultType(observable=X('qubit_count': 1), target=QubitSet([Qubit(0)]))"
)
|
hiitpi/__init__.py | jingw222/hiitpi | 146 | 12636814 | import sys
import logging
import datetime
import random
import cv2
import pandas as pd
from flask import Response, request, redirect, session
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
from flask_caching import Cache
from flask_session import Session
import plotly.express as px
import dash
from dash.dependencies import Input, Output, State
logging.basicConfig(
stream=sys.stdout,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
datefmt=" %I:%M:%S ",
level="INFO",
)
logger = logging.getLogger(__name__)
COLORS = {"graph_bg": "#1E1E1E", "text": "#696969"}
sess = Session()
cache = Cache()
db = SQLAlchemy()
migrate = Migrate()
def create_app(config_name):
"""Create a Dash app."""
from .config import config
from .model import WorkoutSession
from .pose import PoseEngine
from .camera import VideoStream
from .redisclient import RedisClient
from .workout import WORKOUTS
from .annotation import Annotator
from .layout import layout_homepage, layout_login, layout
app = dash.Dash(
__name__,
meta_tags=[
{"name": "charset", "content": "UTF-8"},
{
"name": "viewport",
"content": "width=device-width, initial-scale=1, maximum-scale=1, shrink-to-fit=no",
},
{"name": "author", "content": "<NAME>"},
{
"name": "description",
"content": "A HIIT Workout Trainer Dash App on Your Raspberry Pi",
},
],
)
app.title = "HIIT PI"
app.config.suppress_callback_exceptions = True
app.layout = layout()
server = app.server
server.config.from_object(config[config_name])
with server.app_context():
db.init_app(server)
migrate.init_app(server, db)
sess.init_app(server)
cache.init_app(server)
cache.clear()
video = VideoStream()
model = PoseEngine(model_path=server.config["MODEL_PATH"])
redis = RedisClient(
host=server.config["REDIS_HOST"],
port=server.config["REDIS_PORT"],
db=server.config["REDIS_DB"],
)
def gen(video, workout):
"""Streams and analyzes video contents while overlaying stats info
Args:
video: a VideoStream object.
workout: str, a workout name or "None".
Returns:
bytes, the output image data
"""
if workout != "None":
# Initiates the Workout object from the workout name
workout = WORKOUTS[workout]()
workout.setup(redis=redis)
annotator = Annotator()
for output in video.update():
# Computes pose stats
workout.update(output["pose"])
output["workout"] = workout
# Annotates the image and encodes the raw RGB data into JPEG format
output["array"] = annotator.annotate(output)
img = cv2.cvtColor(output["array"], cv2.COLOR_RGB2BGR)
_, buf = cv2.imencode(".jpeg", img)
yield (
b"--frame\r\nContent-Type: image/jpeg\r\n\r\n"
+ buf.tobytes()
+ b"\r\n\r\n"
)
else:
# Renders a blurring effect while on standby with no workout
for output in video.update():
img = cv2.blur(output["array"], (32, 32))
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, buf = cv2.imencode(".jpeg", img)
yield (
b"--frame\r\nContent-Type: image/jpeg\r\n\r\n"
+ buf.tobytes()
+ b"\r\n\r\n"
)
@app.callback(
[Output("videostream", "src"), Output("workout_name", "children")],
[Input("workout-dropdown", "value")],
)
def start_workout(workout):
if workout is not None:
if workout == "random":
workout = random.choice(list(WORKOUTS))
workout_name = WORKOUTS[workout].name
session["workout"] = workout_name
else:
workout_name = "Select a workout to get started."
session["workout"] = None
logger.info(f'Current workout: {session.get("workout")}')
return f"/videostream/{workout}", workout_name
@app.callback(
Output("workout-dropdown", "value"),
[Input("workout-stop-btn", "n_clicks")],
[State("workout-dropdown", "value")],
)
def stop_workout(n_clicks, workout):
if workout is not None:
ws = WorkoutSession(
user_name=session.get("user_name"),
workout=session.get("workout"),
reps=redis.get("reps"),
pace=redis.get("pace"),
)
db.session.add(ws)
db.session.commit()
logger.info(f"{ws} inserted into db")
return None
@app.callback(
Output("leaderboard-graph", "figure"),
[Input("update-leaderboard-btn", "n_clicks")],
[State("workout-dropdown", "value")],
)
def update_leaderboard_graph(n_clicks, workout):
if n_clicks > 0:
current_time = datetime.datetime.utcnow()
a_week_ago = current_time - datetime.timedelta(weeks=1)
query = (
db.session.query(
WorkoutSession.user_name,
WorkoutSession.workout,
db.func.sum(WorkoutSession.reps).label("reps"),
)
.filter(WorkoutSession.created_date >= a_week_ago)
.group_by(WorkoutSession.user_name, WorkoutSession.workout)
.order_by(db.func.sum(WorkoutSession.reps).desc())
.all()
)
df = pd.DataFrame(query, columns=["user_name", "workout", "reps"])
layout = {
"barmode": "stack",
"margin": {"l": 0, "r": 0, "b": 0, "t": 40},
"autosize": True,
"font": {"family": "Comfortaa", "color": COLORS["text"], "size": 10},
"plot_bgcolor": COLORS["graph_bg"],
"paper_bgcolor": COLORS["graph_bg"],
"xaxis": {
"ticks": "",
"showgrid": False,
"title": "",
"automargin": True,
"zeroline": False,
},
"yaxis": {
"showgrid": False,
"title": "",
"automargin": True,
"categoryorder": "total ascending",
"linewidth": 1,
"linecolor": "#282828",
"zeroline": False,
},
"title": {
"text": "Last 7 Days",
"y": 0.9,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
"legend": {
"x": 1.0,
"y": -0.2,
"xanchor": "right",
"yanchor": "top",
"title": "",
"orientation": "h",
"itemclick": "toggle",
"itemdoubleclick": "toggleothers",
},
"showlegend": True,
}
fig = px.bar(
df,
x="reps",
y="user_name",
color="workout",
orientation="h",
color_discrete_sequence=px.colors.qualitative.Plotly,
)
fig.update_layout(layout)
fig.update_traces(marker_line_width=0, width=0.5)
return fig
else:
return {
"data": [],
"layout": {
"plot_bgcolor": COLORS["graph_bg"],
"paper_bgcolor": COLORS["graph_bg"],
"xaxis": {
"showgrid": False,
"showline": False,
"zeroline": False,
"showticklabels": False,
},
"yaxis": {
"showgrid": False,
"showline": False,
"zeroline": False,
"showticklabels": False,
},
},
}
@server.route("/videostream/<workout>", methods=["GET"])
def videiostream(workout):
user_name = session.get("user_name")
logger.info(f"Current player: {user_name}")
return Response(
gen(video, workout), mimetype="multipart/x-mixed-replace; boundary=frame"
)
@app.callback(
[
Output("live-update-graph", "extendData"),
Output("indicator-reps", "children"),
Output("indicator-pace", "children"),
],
[Input("live-update-interval", "n_intervals")],
)
def update_workout_graph(n_intervals):
inference_time = redis.lpop("inference_time")
pose_score = redis.lpop("pose_score")
data = [{"y": [[inference_time], [pose_score]]}, [0, 1], 200]
reps = redis.get("reps")
pace = redis.get("pace")
return data, f"{reps:.0f}", f"{pace*30:.1f}" if pace > 0 else "/"
@server.route("/user_login", methods=["POST"])
def user_login():
user_name = request.form.get("user_name_form")
session["user_name"] = user_name
logger.info(f"Player {user_name} logged in")
if video.closed is None or video.closed:
video.setup(model=model, redis=redis)
video.start()
return redirect("/home")
@server.route("/user_logout")
def user_logout():
user_name = session.pop("user_name")
if user_name is not None:
session.clear()
logger.info(f"Player {user_name} logged out")
if not video.closed:
video.close()
return redirect("/")
@app.callback(Output("page-content", "children"), [Input("url", "pathname")])
def display_page(pathname):
if pathname == "/home":
current_user = session.get("user_name")
return layout_homepage(current_user)
else:
return layout_login()
return app
|
tests/test_proportional.py | AlessioMorale/luma.core | 114 | 12636825 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014-18 <NAME> and contributors
# See LICENSE.rst for details.
import pytest
from luma.core.legacy.font import proportional, CP437_FONT
def test_narrow_char():
font = proportional(CP437_FONT)
assert font[ord('!')] == [6, 95, 95, 6, 0]
def test_wide_char():
font = proportional(CP437_FONT)
assert font[ord('W')] == CP437_FONT[ord('W')]
def test_space_char():
font = proportional(CP437_FONT)
assert font[ord(' ')] == [0] * 4
def test_doublequote_char():
font = proportional(CP437_FONT)
assert font[ord('"')] == [7, 7, 0, 7, 7, 0]
def test_trim_not_nonzero():
font = proportional(CP437_FONT)
assert font._trim([0, 0, 0, 0]) == []
def test_unicode_not_supported():
font = proportional(CP437_FONT)
with pytest.raises(IndexError) as ex:
font[ord("😀")]
assert str(ex.value) == 'Font does not have ASCII code: 128512'
|
mods/screenshot.py | devrix123/SillyRAT | 442 | 12636858 | class SCREENSHOT:
SC_DATA = b""
def __init__(self):
self.generate()
def generate(self):
obj = io.BytesIO()
im = pyscreenshot.grab()
im.save(obj, format="PNG")
self.SC_DATA = obj.getvalue()
def get_data(self):
return self.SC_DATA |
scripts/automation/trex_control_plane/interactive/trex/astf/trex_astf_port.py | timgates42/trex-core | 956 | 12636902 | <reponame>timgates42/trex-core
from .topo import ASTFTopology
from ..common.trex_port import Port
class ASTFPort(Port):
def __init__(self, *a, **k):
Port.__init__(self, *a, **k)
self.topo = ASTFTopology()
self.service_mode = False
self.service_mode_filtered = False
self.service_mask = 0
def _check_astf_req(self, enabled, filtered, mask):
assert enabled or filtered, "Cannot turn off service mode in ASTF!"
assert mask & 0xFE, "Cannot turn off NO_TCP_UDP flag in ASTF!"
def set_service_mode(self, enabled, filtered, mask):
self.service_mode = enabled
self.service_mode_filtered = filtered
self.service_mask = mask
return self.ok()
def is_service_mode_on(self):
return self.service_mode
def is_service_filtered_mode_on(self):
return self.service_mode_filtered
def is_server(self):
return bool(self.port_id % 2)
def is_client(self):
return not self.is_server()
def _is_service_req(self):
return False |
tensorflow_io/python/experimental/sql_dataset_ops.py | lgeiger/io | 558 | 12636958 | <reponame>lgeiger/io<filename>tensorflow_io/python/experimental/sql_dataset_ops.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""SQLDataset"""
import tensorflow as tf
from tensorflow_io.python.ops import core_ops
class SQLIODataset(tf.data.Dataset):
"""SQLIODataset"""
def __init__(self, query, endpoint=None, spec=None, internal=True):
"""SQLIODataset."""
with tf.name_scope("SQLIODataset"):
assert internal
endpoint = endpoint or ""
resource, count, fields, dtypes = core_ops.io_sql_iterable_init(
query, endpoint
)
if spec is None:
fields = tf.unstack(fields)
dtypes = tf.unstack(dtypes)
spec = {
field.numpy().decode(): tf.TensorSpec(
[None], tf.as_dtype(dtype.numpy()), field.numpy().decode()
)
for (field, dtype) in zip(fields, dtypes)
}
else:
# Make sure shape is [None] and name is part of the spec
spec = {k: tf.TensorSpec([None], v.dtype, k) for k, v in spec.items()}
flatten = tf.nest.flatten(spec)
fields = [e.name for e in flatten]
dtypes = [e.dtype for e in flatten]
self._resource = resource
dataset = tf.data.Dataset.range(0, count)
def f(index):
return tf.nest.pack_sequence_as(
spec, core_ops.io_sql_iterable_read(resource, index, fields, dtypes)
)
dataset = dataset.map(f)
dataset = dataset.unbatch()
self._dataset = dataset
super().__init__(
self._dataset._variant_tensor
) # pylint: disable=protected-access
def _inputs(self):
return []
@property
def element_spec(self):
return self._dataset.element_spec
|
thirdparty/som/examples/peptides.py | mkashifn/celosia | 116 | 12636984 | <filename>thirdparty/som/examples/peptides.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from modlamp.sequences import Helices, Random, AMPngrams
from modlamp.descriptors import PeptideDescriptor
from modlamp.datasets import load_AMPvsTM
from som import SOM
# generate some virtual peptide sequences
libnum = 1000 # 1000 sequences per sublibrary
h = Helices(seqnum=libnum)
r = Random(seqnum=libnum)
n = AMPngrams(seqnum=libnum, n_min=4)
h.generate_sequences()
r.generate_sequences(proba='AMP')
n.generate_sequences()
# calculate molecular descirptors for the peptides
d = PeptideDescriptor(seqs=np.hstack((h.sequences, r.sequences, n.sequences)), scalename='pepcats')
d.calculate_crosscorr(window=7)
# train a som on the descriptors and print / plot the training error
som = SOM(x=12, y=12)
som.fit(data=d.descriptor, epochs=100000, decay='hill')
print("Fit error: %.4f" % som.error)
som.plot_error_history(filename="som_error.png")
# load known antimicrobial peptides (AMPs) and transmembrane sequences
dataset = load_AMPvsTM()
d2 = PeptideDescriptor(dataset.sequences, 'pepcats')
d2.calculate_crosscorr(7)
targets = np.array(libnum*[0] + libnum*[1] + libnum*[2] + 206*[3])
names = ['Helices', 'Random', 'nGrams', 'AMP']
# plot som maps with location of AMPs
som.plot_point_map(np.vstack((d.descriptor, d2.descriptor[206:])), targets, names, filename="peptidesom.png")
som.plot_density_map(np.vstack((d.descriptor, d2.descriptor)), filename="density.png")
som.plot_distance_map(colormap='Reds', filename="distances.png")
colormaps = ['Oranges', 'Purples', 'Greens', 'Reds']
for i, c in enumerate(set(targets)):
som.plot_class_density(np.vstack((d.descriptor, d2.descriptor)), targets, c, names, colormap=colormaps[i],
filename='class%i.png' % c)
# get neighboring peptides (AMPs / TMs) for a sequence of interest
my_d = PeptideDescriptor(seqs='GLFDIVKKVVGALLAG', scalename='pepcats')
my_d.calculate_crosscorr(window=7)
som.get_neighbors(datapoint=my_d.descriptor, data=d2.descriptor, labels=dataset.sequences, d=0)
|
preprocess/load_img.py | cvlab-tohoku/Dense-Co-Attention-Network-for-Visual-Question-Answering | 110 | 12637054 |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import glob
import re
import sys
import cv2
import h5py
import torch
import numpy as np
import argparse
import json
from threading import Thread, Lock
if sys.version_info[0] == 2:
import Queue as queue
else:
import queue
folder_map = {
"train": ["train2014"],
"val": ["val2014"],
"trainval": ["train2014", "val2014"],
"test": ["test2015"],
}
def save_images(image_path, image_type, data_path, data_name, num_workers):
"""
Process all of the image to a numpy array, then store them to a file.
--------------------
Arguments:
image_path (str): path points to images.
image_type (str): "train", "val", "trainval", or "test".
data_path (str): path points to the location which stores images.
data_name (str): name of stored file.
num_workers (int): number of threads used to load images.
"""
dataset = h5py.File(os.path.join(data_path, "%s_%s.h5" % (data_name, image_type)), "w")
q = queue.Queue()
images_idx = {}
images_path = []
lock = Lock()
for data in folder_map[image_type]:
folder = os.path.join(image_path, data)
images_path.extend(glob.glob(folder+"/*"))
pattern = re.compile(r"_([0-9]+).jpg")
for i, img_path in enumerate(images_path):
assert len(pattern.findall(img_path)) == 1, "More than one index found in an image path!"
idx = int(pattern.findall(img_path)[0])
images_idx[idx] = i
q.put((i, img_path))
assert len(images_idx) == len(images_path), "Duplicated indices are found!"
images = dataset.create_dataset("images", (len(images_path), 448, 448, 3), dtype=np.uint8)
def _worker():
while True:
i, img_path = q.get()
if i is None:
break
img = cv2.cvtColor((cv2.resize(cv2.imread(img_path, cv2.CV_LOAD_IMAGE_COLOR), (448, 448))),
cv2.COLOR_BGR2RGB)
with lock:
if i % 1000 == 0:
print("processing %i/%i" % (i, len(images_path)))
images[i] = img
q.task_done()
for _ in range(num_workers):
thread = Thread(target=_worker)
thread.daemon = True
thread.start()
q.join()
print("Terminating threads...")
for _ in range(2*num_workers):
q.put((None, None))
torch.save(images_idx, os.path.join(data_path, "%s_%s.pt" % (data_name, image_type)))
dataset.close()
print("Finish saving images...")
def main(opt):
"""
Create file that stores images in "train", "val", "trainval", and "test" datasets.
"""
# transform = transforms.Compose([
# transforms.Scale(opt.size_scale),
# transforms.ToTensor(),
# transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
# ])
# Process train images
print("Create train images dataset...")
save_images(opt.img_path, "train", opt.data_path, opt.data_name, opt.num_workers)
# Process val images
print("Create val images dataset...")
save_images(opt.img_path, "val", opt.data_path, opt.data_name, opt.num_workers)
# # Process trainval images
# print("Create trainval images dataset...")
# save_images(opt.img_path, "trainval", opt.data_path, opt.data_name, opt.num_workers)
# # Process test images
# print("Create test images dataset...")
# save_images(opt.img_path, "test", opt.data_path, opt.data_name, opt.num_workers)
print("Done!")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--img_path", default="/ceph/kien/data2.0")
parser.add_argument("--data_name", default="cocoimages")
parser.add_argument("--data_path", default="/ceph/kien/VQA/dataset")
parser.add_argument("--num_workers", type=int, default=8)
args = parser.parse_args()
params = vars(args)
print("Parsed input parameters:")
print(json.dumps(params, indent=2))
main(args) |
src/main/resources/assets/openpython/opos/v1.1/lib/micropython/io.py | fossabot/OpenPython | 1,556 | 12637057 | <reponame>fossabot/OpenPython
from uio import *
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
|
tests/unit/utils/test_template.py | shintaii/flower | 4,474 | 12637075 | <reponame>shintaii/flower<gh_stars>1000+
import unittest
from flower.utils.template import humanize, format_time
class TestHumanize(unittest.TestCase):
def test_None(self):
self.assertEqual('', humanize(None))
def test_bool(self):
self.assertEqual(True, humanize(True))
self.assertEqual(False, humanize(False))
def test_numbers(self):
self.assertEqual(0, humanize(0))
self.assertEqual(3, humanize(3))
self.assertEqual(0.2, humanize(0.2))
def test_keywords(self):
self.assertEqual('SSL', humanize('ssl'))
self.assertEqual('SSL', humanize('SSL'))
self.assertEqual('URI', humanize('uri'))
self.assertEqual('URI', humanize('URI'))
self.assertEqual('UUID', humanize('uuid'))
self.assertEqual('UUID', humanize('UUID'))
self.assertEqual('ETA', humanize('eta'))
self.assertEqual('ETA', humanize('ETA'))
self.assertEqual('URL', humanize('url'))
self.assertEqual('URL', humanize('URL'))
self.assertEqual('args', humanize('args'))
self.assertEqual('kwargs', humanize('kwargs'))
def test_uuid(self):
uuid = '5cf83762-9507-4dc5-8e5a-ad730379b099'
self.assertEqual(uuid, humanize(uuid))
def test_sequences(self):
self.assertEqual('2, 3', humanize([2, 3]))
self.assertEqual('2, foo, 1.2', humanize([2, 'foo', 1.2]))
self.assertEqual([None, None], humanize([None, None]))
self.assertEqual([4, {1: 1}], humanize([4, {1: 1}]))
def test_time(self):
from pytz import utc
self.assertEqual(1343911558.305793, humanize(1343911558.305793))
self.assertEqual(format_time(1343911558.305793, utc),
humanize(1343911558.305793, type='time'))
def test_strings(self):
self.assertEqual('Max tasks per child',
humanize('max_tasks_per_child'))
self.assertEqual('URI prefix', humanize('uri_prefix'))
self.assertEqual('Max concurrency', humanize('max-concurrency'))
if __name__ == '__main__':
unittest.main()
|
gcsa/serializers/reminder_serializer.py | gaborantal/google-calendar-simple-api | 139 | 12637080 | from gcsa.reminders import Reminder, EmailReminder, PopupReminder
from .base_serializer import BaseSerializer
class ReminderSerializer(BaseSerializer):
type_ = Reminder
def __init__(self, reminder):
super().__init__(reminder)
@staticmethod
def _to_json(reminder: Reminder):
return {
'method': reminder.method,
'minutes': reminder.minutes_before_start
}
@staticmethod
def _to_object(json_reminder):
method = json_reminder['method']
if method == 'email':
return EmailReminder(int(json_reminder['minutes']))
elif method == 'popup':
return PopupReminder(int(json_reminder['minutes']))
else:
raise ValueError('Unexpected method "{}" for a reminder.'.format(method))
|
python/keepsake/console.py | lambdaofgod/keepsake | 810 | 12637104 | import enum
from functools import wraps
import sys
from ._vendor.colors import color
# Parallel of go/pkg/console/
class Level(enum.Enum):
INFO = "INFO"
WARN = "WARN"
ERROR = "ERROR"
def info(s: str):
log(s, Level.INFO)
def warn(s: str):
log(s, Level.WARN)
def error(s: str):
log(s, Level.ERROR)
def log(s: str, level: Level):
# Add word wrapping, see https://github.com/replicate/keepsake/issues/348
prompt = "═══╡ "
continuation_prompt = " │ "
# We should support NO_COLOR, see https://github.com/replicate/keepsake/issues/349
if sys.stderr.isatty():
kwargs = {"style": "faint"}
if level == Level.WARN:
kwargs = {"fg": "yellow"}
elif level == Level.ERROR:
kwargs = {"fg": "red"}
prompt = color(prompt, **kwargs)
continuation_prompt = color(continuation_prompt, **kwargs)
for i, line in enumerate(s.split("\n")):
if i == 0:
print(prompt + line, file=sys.stderr)
else:
print(continuation_prompt + line, file=sys.stderr)
# Keepsake should never break your training
def catch_and_print_exceptions(msg=None, return_value=None):
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
if msg is not None:
error(f"{msg}: {str(e)}")
else:
error(str(e))
return return_value
return wrapper
return decorator
|
rl_baselines/hyperparam_search.py | anonymous-authors-2018/robotics-repo | 524 | 12637116 | import argparse
import subprocess
import os
import shutil
import glob
import pprint
import math
import time
import pandas as pd
import numpy as np
import hyperopt
from rl_baselines.registry import registered_rl
from environments.registry import registered_env
from state_representation.registry import registered_srl
from srl_zoo.utils import printGreen
ITERATION_SCALE = 10000
MIN_ITERATION = 30000
class HyperParameterOptimizer(object):
def __init__(self, opt_param, train, seed=0):
"""
the base class for hyper parameter optimizer
:param opt_param: (dict) the parameters to optimize
:param train: (function (dict, int, int): float) the function that take:
- params: (dict) the hyper parameters to train with
- num_iters (int) the number of iterations to train (can be None)
- train_id: (int) the current iteration number in the hyperparameter search (can be None)
- returns: (float) the score of the training to minimize
:param seed: (int) the initial seed for the random number generator
"""
self.opt_param = opt_param
self.train = train
self.seed = seed
self.history = []
def run(self):
"""
run the hyper parameter search
"""
raise NotImplementedError
class Hyperband(HyperParameterOptimizer):
def __init__(self, opt_param, train, seed=0, max_iter=100, eta=3.0):
"""
A Hyperband implementation, it is similar to a targeted random search
Hyperband: https://arxiv.org/abs/1603.06560
:param opt_param: (dict) the parameters to optimize
:param train: (function (dict, int, int): float) the function that take:
- params: (dict) the hyper parameters to train with
- num_iters (int) the number of iterations to train (can be None)
- train_id: (int) the current iteration number in the hyperparameter search (can be None)
- returns: (float) the score of the training to minimize
:param seed: (int) the initial seed for the random number generator
:param max_iter: (int) the maximum budget for hyperband's search
:param eta: (float) the reduction factor of the search
"""
super(Hyperband, self).__init__(opt_param, train, seed=seed)
self.max_iter = max_iter
self.eta = eta
self.max_steps = int(math.floor(math.log(self.max_iter) / math.log(self.eta)))
self.budget = (self.max_steps + 1) * self.max_iter
self.rng = np.random.RandomState(seed)
self.param_sampler = self._generate_sampler()
def _generate_sampler(self):
# will generate a hyperparameter sampler for Hyperband
def _sample():
params = {}
for name, (param_type, val) in self.opt_param.items():
if param_type == int:
params[name] = self.rng.randint(val[0], val[1])
elif param_type == float:
params[name] = self.rng.uniform(val[0], val[1])
elif isinstance(param_type, tuple) and param_type[0] == list:
params[name] = val[self.rng.randint(len(val))]
else:
raise AssertionError("Error: unknown type {}".format(param_type))
return params
return _sample
def run(self):
for step in reversed(range(self.max_steps + 1)):
max_n_param_sampled = int(math.ceil(self.budget / self.max_iter * self.eta**step / (step + 1)))
max_iters = self.max_iter * self.eta**(-step)
all_parameters = np.array([self.param_sampler() for _ in range(max_n_param_sampled)])
for i in range(step + 1):
printGreen("\npop_itt:{}/{}, itt:{}/{}, pop_size:{}".format(self.max_steps - step, self.max_steps + 1,
i, step+1, len(all_parameters)))
n_param_sampled = int(math.floor(max_n_param_sampled * self.eta**(-i)))
num_iters = max_iters * self.eta**i
losses = [self.train(params, num_iters, train_id) for train_id, params in enumerate(all_parameters)]
self.history.extend(zip([(params, num_iters) for params in all_parameters], losses))
all_parameters = all_parameters[np.argsort(losses)[:int(math.floor(n_param_sampled / self.eta))]]
return self.history[int(np.argmin([val[1] for val in self.history]))]
class Hyperopt(HyperParameterOptimizer):
def __init__(self, opt_param, train, seed=0, num_eval=100):
"""
A Hyperopt implementation, it is similar to a bayesian search
Hyperopt: https://www.lri.fr/~kegl/research/PDFs/BeBaBeKe11.pdf
:param opt_param: (dict) the parameters to optimize
:param train: (function (dict, int, int): float) the function that take:
- params: (dict) the hyper parameters to train with
- num_iters (int) the number of iterations to train (can be None)
- train_id: (int) the current iteration number in the hyperparameter search (can be None)
- returns: (float) the score of the training to minimize
:param seed: (int) the initial seed for the random number generator
:param num_eval: (int) the number of evaluation to do
"""
super(Hyperopt, self).__init__(opt_param, train, seed=seed)
self.num_eval = num_eval
self.search_space = {}
for name, (param_type, val) in self.opt_param.items():
if param_type == int:
self.search_space[name] = hyperopt.hp.choice(name, np.arange(int(val[0]), int(val[1]), dtype=int))
elif param_type == float:
self.search_space[name] = hyperopt.hp.uniform(name, val[0], val[1])
elif isinstance(param_type, tuple) and param_type[0] == list:
self.search_space[name] = hyperopt.hp.choice(name, val)
else:
raise AssertionError("Error: unknown type {}".format(param_type))
def run(self):
trials = hyperopt.Trials()
hyperopt.fmin(fn=lambda kwargs: {'loss': self.train(kwargs), 'status': hyperopt.STATUS_OK},
space=self.search_space,
algo=hyperopt.tpe.suggest,
max_evals=self.num_eval,
trials=trials,
verbose=10)
# from the trials, get the values for every parameter
# set the number of iter to None as they are not changed in Hyperopt
# and zip the loss
self.history.extend(zip([(
{name: val[0] for name, val in params["misc"]["vals"].items()}, None)
for params in trials.trials], trials.losses()))
return self.history[int(np.argmin([val[1] for val in self.history]))]
def makeRlTrainingFunction(args, train_args):
"""
makes a training function for the hyperparam optimizers
:param args: (ArgumentParser) the optimizer arguments
:param train_args: (ArgumentParser) the remaining arguments
:return: (function (dict, int, int): float) the function that take:
- params: (dict) the hyper parameters to train with
- num_iters (int) the number of iterations to train (can be None)
- train_id: (int) the current iteration number in the hyperparameter search (can be None)
- returns: (float) the score of the training to minimize
"""
if args.verbose:
# None here means stdout of terminal for subprocess.call
stdout = None
else:
stdout = open(os.devnull, 'w')
def _train(params, num_iters=None, train_id=None):
# generate a print string
print_str = "\nID_num={}, "
format_args = []
if train_id is None:
if not hasattr(_train, "current_id"):
_train.current_id = 0
train_id = _train.current_id
_train.current_id += 1
format_args.append(train_id)
if num_iters is not None:
print_str += "Num-timesteps={}, "
format_args.append(int(max(MIN_ITERATION, num_iters * ITERATION_SCALE)))
print_str += "Param:"
printGreen(print_str.format(*format_args))
pprint.pprint(params)
# cleanup old files
if os.path.exists(args.log_dir):
shutil.rmtree(args.log_dir)
# add the training args that where parsed for the hyperparam optimizers
if num_iters is not None:
loop_args = ['--num-timesteps', str(int(max(MIN_ITERATION, num_iters * ITERATION_SCALE)))]
else:
loop_args = ['--num-timesteps', str(int(args.num_timesteps))]
# redefine the hyperparam args for rl_baselines.train
if len(params) > 0:
loop_args.append("--hyperparam")
for param_name, param_val in params.items():
loop_args.append("{}:{}".format(param_name, param_val))
# call the training
ok = subprocess.call(['python', '-m', 'rl_baselines.train'] + train_args + loop_args, stdout=stdout)
if ok != 0:
# throw the error down to the terminal
raise ChildProcessError("An error occured, error code: {}".format(ok))
# load the logging of the training, and extract the reward
folders = glob.glob("{}/{}/{}/{}/*".format(args.log_dir, args.env, args.srl_model, args.algo))
assert len(folders) != 0, "Error: Could not find generated directory, halting {} search.".format(args.optimizer)
rewards = []
for monitor_path in glob.glob(folders[0] + "/*.monitor.csv"):
rewards.append(np.mean(pd.read_csv(monitor_path, skiprows=1)["r"][-10:]))
if np.isnan(rewards).any():
rewards = -np.inf
print("reward: ", np.mean(rewards))
# negative reward, as we are minimizing with hyperparameter search
return -np.mean(rewards)
return _train
def main():
parser = argparse.ArgumentParser(description="Hyperparameter search for implemented RL models")
parser.add_argument('--optimizer', default='hyperband', choices=['hyperband', 'hyperopt'], type=str,
help='The hyperparameter optimizer to choose from')
parser.add_argument('--algo', default='ppo2', choices=list(registered_rl.keys()), help='OpenAI baseline to use',
type=str)
parser.add_argument('--env', type=str, help='environment ID', default='KukaButtonGymEnv-v0',
choices=list(registered_env.keys()))
parser.add_argument('--seed', type=int, default=0, help='random seed (default: 0)')
parser.add_argument('--srl-model', type=str, default='raw_pixels', choices=list(registered_srl.keys()),
help='SRL model to use')
parser.add_argument('--num-timesteps', type=int, default=1e6, help='number of timesteps the baseline should run')
parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Display baseline STDOUT')
parser.add_argument('--max-eval', type=int, default=100, help='Number of evalutation to try for hyperopt')
args, train_args = parser.parse_known_args()
args.log_dir = "logs/_{}_search/".format(args.optimizer)
train_args.extend(['--srl-model', args.srl_model, '--seed', str(args.seed), '--algo', args.algo, '--env', args.env,
'--log-dir', args.log_dir, '--no-vis'])
# verify the algorithm has defined it, and that it returnes an expected value
try:
opt_param = registered_rl[args.algo][0].getOptParam()
assert opt_param is not None
except AttributeError or AssertionError:
raise AssertionError("Error: {} algo does not support hyperparameter search.".format(args.algo))
if args.optimizer == "hyperband":
opt = Hyperband(opt_param, makeRlTrainingFunction(args, train_args), seed=args.seed,
max_iter=args.num_timesteps // ITERATION_SCALE)
elif args.optimizer == "hyperopt":
opt = Hyperopt(opt_param, makeRlTrainingFunction(args, train_args), seed=args.seed, num_eval=args.max_eval)
else:
raise ValueError("Error: optimizer {} was defined but not implemented, Halting.".format(args.optimizer))
t_start = time.time()
opt.run()
all_params, loss = zip(*opt.history)
idx = np.argmin(loss)
opt_params, nb_iter = all_params[idx]
reward = loss[idx]
print('\ntime to run : {}s'.format(int(time.time() - t_start)))
print('Total nb. evaluations : {}'.format(len(all_params)))
if nb_iter is not None:
print('Best nb. of iterations : {}'.format(int(nb_iter)))
print('Best params : ')
pprint.pprint(opt_params)
print('Best reward : {:.3f}'.format(-reward))
param_dict, timesteps = zip(*all_params)
output = pd.DataFrame(list(param_dict))
# make sure we returned a timestep value to log, otherwise ignore
if not any([el is None for el in timesteps]):
output["timesteps"] = np.array(np.maximum(MIN_ITERATION, np.array(timesteps) * ITERATION_SCALE).astype(int))
output["reward"] = -np.array(loss)
output.to_csv("logs/{}_{}_{}_{}_seed{}_numtimestep{}.csv"
.format(args.optimizer, args.algo, args.env, args.srl_model, args.seed, args.num_timesteps))
if __name__ == '__main__':
main()
|
tests/test_PD002.py | dat-boris/pandas-vet | 136 | 12637135 | <reponame>dat-boris/pandas-vet
# stdlib
import ast
from pandas_vet import VetPlugin
from pandas_vet import PD002
def test_PD002_pass():
"""
Test that using inplace=False explicitly does not result in an error.
"""
statement = """df.drop(['a'], axis=1, inplace=False)"""
tree = ast.parse(statement)
actual = list(VetPlugin(tree).run())
expected = []
assert actual == expected
def test_PD002_fail():
"""
Test that using inplace=True results in an error.
"""
statement = """df.drop(['a'], axis=1, inplace=True)"""
tree = ast.parse(statement)
actual = list(VetPlugin(tree).run())
expected = [PD002(1, 0)]
assert actual == expected
|
usaspending_api/references/migrations/0047_add_tas.py | g4brielvs/usaspending-api | 217 | 12637143 | # Generated by Django 2.2.10 on 2020-06-25 19:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_delete_appropriationaccountbalancesquarterly'),
('references', '0046_sf_balances_table'),
]
operations = [
migrations.AddField(
model_name='gtassf133balances',
name='tas_rendering_label',
field=models.TextField(db_index=True, null=True),
),
migrations.AddField(
model_name='gtassf133balances',
name='treasury_account_identifier',
field=models.ForeignKey(db_column='treasury_account_identifier', null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='gtas', to='accounts.TreasuryAppropriationAccount'),
),
migrations.AlterUniqueTogether(
name='gtassf133balances',
unique_together={('fiscal_year', 'fiscal_period', 'disaster_emergency_fund_code', 'tas_rendering_label')},
),
]
|
tests/unit/pytorch/distributions/test_normal.py | chiragnagpal/probflow | 134 | 12637182 | import numpy as np
import pytest
import torch
from probflow.distributions import Normal
tod = torch.distributions
def is_close(a, b, tol=1e-3):
return np.abs(a - b) < tol
def test_Normal():
"""Tests Normal distribution"""
# Create the distribution
dist = Normal()
# Check default params
assert dist.loc == 0
assert dist.scale == 1
# Call should return backend obj
assert isinstance(dist(), tod.normal.Normal)
# Test methods
npdf = lambda x, m, s: (
1.0
/ np.sqrt(2 * np.pi * s * s)
* np.exp(-np.power(x - m, 2) / (2 * s * s))
)
assert is_close(dist.prob(0).numpy(), npdf(0, 0, 1))
assert is_close(dist.prob(1).numpy(), npdf(1, 0, 1))
assert is_close(dist.log_prob(0).numpy(), np.log(npdf(0, 0, 1)))
assert is_close(dist.log_prob(1).numpy(), np.log(npdf(1, 0, 1)))
assert dist.mean().numpy() == 0.0
# Test sampling
samples = dist.sample()
assert isinstance(samples, torch.Tensor)
assert samples.ndim == 0
samples = dist.sample(10)
assert isinstance(samples, torch.Tensor)
assert samples.ndim == 1
assert samples.shape[0] == 10
# Should be able to set params
dist = Normal(loc=3, scale=2)
assert dist.loc == 3
assert dist.scale == 2
# But only with Tensor-like objs
with pytest.raises(TypeError):
dist = Normal(loc="lalala", scale="lalala")
with pytest.raises(TypeError):
dist = Normal(loc=0, scale="lalala")
with pytest.raises(TypeError):
dist = Normal(loc="lalala", scale=1)
|
python/sparsemap/layers_pt/tests/test_seq_layer.py | mikejqzhang/sparsemap | 101 | 12637239 | from .. import seq_layer
import torch
from torch.autograd import gradcheck, Variable
def test_seq_sparse_decode():
torch.manual_seed(2)
n_vars = 4
n_states = 3
for _ in range(20):
sequence_smap = seq_layer.SequenceSparseMarginals(max_iter=1000)
unary = Variable(torch.randn(n_vars, n_states), requires_grad=True)
additionals = Variable(torch.randn(2 * n_states +
(n_vars - 1) * n_states ** 2),
requires_grad=True)
res = gradcheck(sequence_smap, (unary, additionals), eps=1e-4, atol=1e-3)
print(res)
assert res
def test_seq_dist_sparse_decode():
torch.manual_seed(42)
n_vars = 4
n_states = 3
bandwidth = 3
for _ in range(20):
seq_dist_smap = seq_layer.SequenceDistanceSparseMarginals(bandwidth)
unary = Variable(torch.randn(n_vars, n_states), requires_grad=True)
additionals = Variable(torch.randn(1 + 4 * bandwidth),
requires_grad=True)
res = gradcheck(seq_dist_smap, (unary, additionals), eps=1e-4, atol=1e-3)
print(res)
assert res
|
android/app/src/main/python/electroncash_gui/android/strings.py | christroutner/Electron-Cash | 208 | 12637247 | # This file lists translatable strings used in the Android app which don't appear anywhere else
# in the Electron Cash repository. Some of them only differ in capitalization or punctuation:
# see https://medium.com/@jsaito/making-a-case-for-letter-case-19d09f653c98
#
# Please keep the strings in alphabetical order.
# This file is never actually imported, but keep syntax checkers happy.
from gettext import gettext as _, ngettext
ngettext("%d address", "%d addresses", 1)
_("(%1$d of %2$d)")
_("Are you sure you want to delete your wallet \'%s\'?")
_("BIP39 seed")
_("Block explorer")
_("%s bytes")
_("Cannot process a URI while this dialog is open.")
_("Cannot specify private keys and addresses in the same wallet.")
_("Change password")
_("Close wallet")
_("Confirm password")
_("Console")
_("Copyright © 2017-2022 Electron Cash LLC and the Electron Cash developers.")
_("Current password")
_("Cosigner %d")
_("Delete wallet")
_("Derivation invalid")
_("Disconnect")
_("Do you want to close this wallet?")
_("Enter password")
_("Export wallet")
_("Filename is too long")
_("Filenames cannot contain the '/' character")
_("For support, please visit us on <a href='https://github.com/Electron-Cash/Electron-Cash/issues'>"
"GitHub</a> or on <a href='https://t.me/electroncashwallet'>Telegram</a>.")
_("ID")
_("Import addresses or private keys")
_("Invalid address")
_("Load transaction")
_("Made with <a href='https://chaquo.com/chaquopy'>Chaquopy</a>, the Python SDK for Android.")
_("Master public key")
_("Master public keys")
_("New password")
_("New wallet")
_("No wallet is open.")
_("No wallet")
_("Not a valid address or private key: '%s'")
_("Passphrase")
_("Paste or scan a transaction in hex format:")
_("Press the menu button above to open or create one.")
_("Rename wallet")
_("Request")
_("Restore from seed")
_("Scan QR")
_("Sign transaction")
_("Show seed")
_("Size")
_("Signed transaction")
ngettext("Sweep %d input", "Sweep %d inputs", 1)
_("Transaction not found")
_("%1$d tx (%2$d unverified)")
_("Use a master key")
_("Wallet information")
_("Wallet exported successfully")
_("Wallet renamed successfully")
_("Wallet seed")
_("You don't have any contacts.")
|
src/pfun/functions.py | suned/pfun | 126 | 12637251 | import functools
import inspect
from typing import Any, Callable, Generic, Tuple, TypeVar
from .immutable import Immutable
A = TypeVar('A')
B = TypeVar('B')
C = TypeVar('C')
def identity(v: A) -> A:
"""
The identity function. Just gives back its argument
Example:
>>> identity('value')
'value'
Args:
v: The value to get back
Return:
`v`
"""
return v
Unary = Callable[[A], B]
Predicate = Callable[[A], bool]
class Always(Generic[A], Immutable):
"""
A Callable that always returns the same value
regardless of the arguments
Example:
>>> f = Always(1)
>>> f(None)
1
>>> f('')
1
>>> "... and so on..."
"""
value: A
def __call__(self, *args, **kwargs) -> A:
return self.value
def always(value: A) -> Callable[..., A]:
"""
Get a function that always returns `value`
Example:
>>> f = always(1)
>>> f(None)
1
>>> f('')
1
>>> "... and so on..."
Args:
value: The value to return always
Return:
function that always returns `value`
"""
return Always(value)
class Composition(Immutable):
functions: Tuple[Callable, ...]
def __repr__(self) -> str:
functions_repr = ', '.join(repr(f) for f in self.functions)
return f'compose({functions_repr})'
def __call__(self, *args, **kwargs):
fs = reversed(self.functions)
first, *rest = fs
last_result = first(*args, **kwargs)
for f in rest:
last_result = f(last_result)
return last_result
def compose(
f: Callable[[Any], Any],
g: Callable[[Any], Any],
*functions: Callable[[Any], Any]
) -> Callable[[Any], Any]:
"""
Compose functions from left to right
Example:
>>> f = lambda v: v * 2
>>> g = compose(str, f)
>>> g(3)
"6"
Args:
f: the outermost function in the composition
g: the function to be composed with f
functions: functions to be composed with `f` \
and `g` from left to right
Return:
`f` composed with `g` composed with `functions` from left to right
"""
fs: Tuple[Callable, ...] = ()
for h in (f, g) + functions:
if isinstance(h, Composition):
fs += h.functions
else:
fs += (h,)
return Composition(fs)
def pipeline(
first: Callable[[Any], Any],
second: Callable[[Any], Any],
*rest: Callable[[Any], Any]
):
"""
Compose functions from right to left
Example:
>>> f = lambda v: v * 2
>>> g = pipeline(f, str)
>>> g(3)
"6"
Args:
first: the innermost function in the composition
g: the function to compose with f
functions: functions to compose with `first` and \
`second` from right to left
Return:
`rest` composed from right to left, composed with \
`second` composed with `first`
"""
return compose(*reversed(rest), second, first)
class Curry:
_f: Callable
def __init__(self, f: Callable):
functools.wraps(f)(self)
self._f = f # type: ignore
def __repr__(self):
return f'curry({repr(self._f)})'
def __call__(self, *args, **kwargs):
signature = inspect.signature(self._f)
bound = signature.bind_partial(*args, **kwargs)
bound.apply_defaults()
arg_names = {a for a in bound.arguments.keys()}
parameters = {p for p in signature.parameters.keys()}
if parameters - arg_names == set():
return self._f(*args, **kwargs)
if isinstance(self._f, functools.partial):
partial = functools.partial(
self._f.func,
*(self._f.args + args),
**self._f.keywords,
**kwargs
)
else:
partial = functools.partial(self._f, *args, **kwargs)
return Curry(partial)
def curry(f: Callable) -> Callable:
"""
Get a version of ``f`` that can be partially applied
Example:
>>> f = lambda a, b: a + b
>>> f_curried = curry(f)
>>> f_curried(1)
functools.partial(<function <lambda> at 0x1051f0950>, a=1)
>>> f_curried(1)(1)
2
Args:
f: The function to curry
Returns:
Curried version of ``f``
"""
@functools.wraps(f)
def decorator(*args, **kwargs):
return Curry(f)(*args, **kwargs)
return decorator
def flip(f: Callable) -> Callable:
"""
Reverse the order of positional arguments of `f`
Example:
>>> f = lambda a, b, c: (a, b, c)
>>> flip(f)('a', 'b', 'c')
('c', 'b', 'a')
Args:
f: Function to flip positional arguments of
Returns:
Function with positional arguments flipped
"""
return curry(lambda *args, **kwargs: f(*reversed(args), **kwargs))
__all__ = [
'curry', 'always', 'compose', 'pipeline', 'identity', 'Unary', 'Predicate'
]
|
examples/python/gee_score_test_simulation.py | CCHiggins/statsmodels | 6,931 | 12637257 | #!/usr/bin/env python
# coding: utf-8
# DO NOT EDIT
# Autogenerated from the notebook gee_score_test_simulation.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
# # GEE score tests
#
# This notebook uses simulation to demonstrate robust GEE score tests.
# These tests can be used in a GEE analysis to compare nested hypotheses
# about the mean structure. The tests are robust to miss-specification of
# the working correlation model, and to certain forms of misspecification of
# the variance structure (e.g. as captured by the scale parameter in a
# quasi-Poisson analysis).
#
# The data are simulated as clusters, where there is dependence within but
# not between clusters. The cluster-wise dependence is induced using a
# copula approach. The data marginally follow a negative binomial
# (gamma/Poisson) mixture.
#
# The level and power of the tests are considered below to assess the
# performance of the tests.
import pandas as pd
import numpy as np
from scipy.stats.distributions import norm, poisson
import statsmodels.api as sm
import matplotlib.pyplot as plt
# The function defined in the following cell uses a copula approach to
# simulate correlated random values that marginally follow a negative
# binomial distribution. The input parameter `u` is an array of values in
# (0, 1). The elements of `u` must be marginally uniformly distributed on
# (0, 1). Correlation in `u` will induce correlations in the returned
# negative binomial values. The array parameter `mu` gives the marginal
# means, and the scalar parameter `scale` defines the mean/variance
# relationship (the variance is `scale` times the mean). The lengths of `u`
# and `mu` must be the same.
def negbinom(u, mu, scale):
p = (scale - 1) / scale
r = mu * (1 - p) / p
x = np.random.gamma(r, p / (1 - p), len(u))
return poisson.ppf(u, mu=x)
# Below are some parameters that govern the data used in the simulation.
# Sample size
n = 1000
# Number of covariates (including intercept) in the alternative hypothesis
# model
p = 5
# Cluster size
m = 10
# Intraclass correlation (controls strength of clustering)
r = 0.5
# Group indicators
grp = np.kron(np.arange(n / m), np.ones(m))
# The simulation uses a fixed design matrix.
# Build a design matrix for the alternative (more complex) model
x = np.random.normal(size=(n, p))
x[:, 0] = 1
# The null design matrix is nested in the alternative design matrix. It
# has rank two less than the alternative design matrix.
x0 = x[:, 0:3]
# The GEE score test is robust to dependence and overdispersion. Here we
# set the overdispersion parameter. The variance of the negative binomial
# distribution for each observation is equal to `scale` times its mean
# value.
# Scale parameter for negative binomial distribution
scale = 10
# In the next cell, we set up the mean structures for the null and
# alternative models
# The coefficients used to define the linear predictors
coeff = [[4, 0.4, -0.2], [4, 0.4, -0.2, 0, -0.04]]
# The linear predictors
lp = [np.dot(x0, coeff[0]), np.dot(x, coeff[1])]
# The mean values
mu = [np.exp(lp[0]), np.exp(lp[1])]
# Below is a function that carries out the simulation.
# hyp = 0 is the null hypothesis, hyp = 1 is the alternative hypothesis.
# cov_struct is a statsmodels covariance structure
def dosim(hyp, cov_struct=None, mcrep=500):
# Storage for the simulation results
scales = [[], []]
# P-values from the score test
pv = []
# Monte Carlo loop
for k in range(mcrep):
# Generate random "probability points" u that are uniformly
# distributed, and correlated within clusters
z = np.random.normal(size=n)
u = np.random.normal(size=n // m)
u = np.kron(u, np.ones(m))
z = r * z + np.sqrt(1 - r**2) * u
u = norm.cdf(z)
# Generate the observed responses
y = negbinom(u, mu=mu[hyp], scale=scale)
# Fit the null model
m0 = sm.GEE(y,
x0,
groups=grp,
cov_struct=cov_struct,
family=sm.families.Poisson())
r0 = m0.fit(scale='X2')
scales[0].append(r0.scale)
# Fit the alternative model
m1 = sm.GEE(y,
x,
groups=grp,
cov_struct=cov_struct,
family=sm.families.Poisson())
r1 = m1.fit(scale='X2')
scales[1].append(r1.scale)
# Carry out the score test
st = m1.compare_score_test(r0)
pv.append(st["p-value"])
pv = np.asarray(pv)
rslt = [np.mean(pv), np.mean(pv < 0.1)]
return rslt, scales
# Run the simulation using the independence working covariance structure.
# We expect the mean to be around 0 under the null hypothesis, and much
# lower under the alternative hypothesis. Similarly, we expect that under
# the null hypothesis, around 10% of the p-values are less than 0.1, and a
# much greater fraction of the p-values are less than 0.1 under the
# alternative hypothesis.
rslt, scales = [], []
for hyp in 0, 1:
s, t = dosim(hyp, sm.cov_struct.Independence())
rslt.append(s)
scales.append(t)
rslt = pd.DataFrame(rslt, index=["H0", "H1"], columns=["Mean", "Prop(p<0.1)"])
print(rslt)
# Next we check to make sure that the scale parameter estimates are
# reasonable. We are assessing the robustness of the GEE score test to
# dependence and overdispersion, so here we are confirming that the
# overdispersion is present as expected.
_ = plt.boxplot([scales[0][0], scales[0][1], scales[1][0], scales[1][1]])
plt.ylabel("Estimated scale")
# Next we conduct the same analysis using an exchangeable working
# correlation model. Note that this will be slower than the example above
# using independent working correlation, so we use fewer Monte Carlo
# repetitions.
rslt, scales = [], []
for hyp in 0, 1:
s, t = dosim(hyp, sm.cov_struct.Exchangeable(), mcrep=100)
rslt.append(s)
scales.append(t)
rslt = pd.DataFrame(rslt, index=["H0", "H1"], columns=["Mean", "Prop(p<0.1)"])
print(rslt)
|
owo.py | ThePlasmaRailgun/owoScript | 143 | 12637291 | <filename>owo.py<gh_stars>100-1000
from owoi import run_owo_pseudocode
from owoc import owos_to_code
import argparse
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Run OwO code in OwO form.')
argparser.add_argument('file', type=argparse.FileType('r'), help='File with OwO code')
argparser.add_argument('--pseudo', '-p', action='store_true', default=False,
help='Whether to run OwO bytecode or pseudocode. Use this for testing without having to compile')
argparser.add_argument('--debug', '-d', action='store_true', default=False,
help='Debug mode.')
args = argparser.parse_args()
code = args.file.read()
if not args.pseudo:
decompiled_code = owos_to_code(code)
else:
decompiled_code = code
run_owo_pseudocode(decompiled_code, args.debug) |
Trinkey_QT2040_Enviro_Gadget/u2if/enviro_u2if.py | gamblor21/Adafruit_Learning_System_Guides | 665 | 12637293 | <filename>Trinkey_QT2040_Enviro_Gadget/u2if/enviro_u2if.py
# SPDX-FileCopyrightText: 2021 <NAME> for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import time
import board
import adafruit_scd4x
from adafruit_bme280 import basic as adafruit_bme280
scd = adafruit_scd4x.SCD4X(board.I2C())
scd.start_periodic_measurement()
bme = adafruit_bme280.Adafruit_BME280_I2C(board.I2C())
while True:
time.sleep(5)
print("CO2 =", scd.CO2)
print("Pressure = {:.1f} hPa".format(bme.pressure))
print("Temperature = {:.1f} degC".format(bme.temperature))
print("Humidity = {:.1f}%".format(bme.humidity))
|
GeneratorInterface/MCatNLOInterface/test/testSourceAndHadronizer_cfg.py | ckamtsikis/cmssw | 852 | 12637309 | <reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
process = cms.Process('Test')
process.load('Configuration.StandardSequences.Services_cff')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.source = cms.Source("MCatNLOSource",
fileNames = cms.untracked.vstring('file:Z.events'),
processCode = cms.int32(-11361),
skipEvents=cms.untracked.uint32(0)
)
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(-1))
process.generator = cms.EDFilter("Herwig6HadronizerFilter",
comEnergy = cms.double(10000.0),
useJimmy = cms.bool(False),
doMPInteraction = cms.bool(False),
herwigHepMCVerbosity = cms.untracked.bool(False),
herwigVerbosity = cms.untracked.int32(1),
printCards = cms.untracked.bool(True),
maxEventsToPrint = cms.untracked.int32(0),
crossSection = cms.untracked.double(-1.0),
filterEfficiency = cms.untracked.double(1.0),
emulatePythiaStatusCodes = cms.untracked.bool(False),
numTrialsMPI = cms.untracked.int32(1),
HerwigParameters = cms.PSet(
parameterSets = cms.vstring(
'herwigMcatnlo'
),
herwigMcatnlo = cms.vstring(
'PTMIN = 0.5 ! minimum pt in hadronic jet'
)
)
)
process.RandomNumberGeneratorService.generator = cms.PSet(
initialSeed = cms.untracked.uint32(123456789),
engineName = cms.untracked.string('HepJamesRandom')
)
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr.threshold = 'INFO'
process.ProductionFilterSequence = cms.Sequence(process.generator)
process.generation_step = cms.Path(process.ProductionFilterSequence)
process.output = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('mcatnloZee.root'),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('generation_step')
)
)
process.output_step = cms.EndPath(process.output)
|
videoanalyst/pipeline/utils/__init__.py | TragedyN/SiamFCpp | 737 | 12637317 | <reponame>TragedyN/SiamFCpp
# -*- coding: utf-8 -*
from videoanalyst.evaluation.vot_benchmark.bbox_helper import cxy_wh_2_rect
from .bbox import (clip_bbox, cxywh2xywh, cxywh2xyxy, xywh2cxywh, xywh2xyxy,
xyxy2cxywh, xyxy2xywh)
from .crop import get_axis_aligned_bbox, get_crop, get_subwindow_tracking
from .misc import imarray_to_tensor, tensor_to_numpy
__all__ = [
clip_bbox, cxy_wh_2_rect, cxywh2xywh, cxywh2xyxy, xywh2cxywh, xywh2cxywh,
xyxy2cxywh, xyxy2xywh, xywh2xyxy, get_axis_aligned_bbox, get_crop,
get_subwindow_tracking, imarray_to_tensor, tensor_to_numpy
]
|
tensorflow_gnn/keras/layers/padding_ops_test.py | tensorflow/gnn | 611 | 12637321 | """Tests for padding_ops Keras layers."""
import enum
import os
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_gnn.graph import adjacency as adj
from tensorflow_gnn.graph import graph_tensor as gt
from tensorflow_gnn.graph import preprocessing_common
from tensorflow_gnn.keras import keras_tensors # For registration. pylint: disable=unused-import
from tensorflow_gnn.keras.layers import padding_ops
class ReloadModel(int, enum.Enum):
"""Controls how to reload a model for further testing after saving."""
SKIP = 0
SAVED_MODEL = 1
KERAS = 2
class PadToTotalSizesTest(tf.test.TestCase, parameterized.TestCase):
def _make_test_graph(self):
return gt.GraphTensor.from_pieces(
context=gt.Context.from_fields(
features={"label": tf.constant([42])}),
node_sets={"nodes": gt.NodeSet.from_fields(
sizes=tf.constant([1]),
features={"feature": tf.constant([[1., 2.]])})},
edge_sets={"edges": gt.EdgeSet.from_fields(
sizes=tf.constant([1]),
adjacency=adj.Adjacency.from_indices(("nodes", tf.constant([0])),
("nodes", tf.constant([0]))),
features={"weight": tf.constant([1.0])})})
@parameterized.named_parameters(
("", ReloadModel.SKIP),
("Restored", ReloadModel.SAVED_MODEL),
("RestoredKeras", ReloadModel.KERAS))
def test(self, reload_model):
input_graph = self._make_test_graph()
sc = preprocessing_common.SizeConstraints(
total_num_components=2,
total_num_nodes={"nodes": 3},
total_num_edges={"edges": tf.constant(4)}) # Test conversion to int.
pad = padding_ops.PadToTotalSizes(sc)
inputs = tf.keras.layers.Input(type_spec=input_graph.spec)
outputs = pad(inputs)
model = tf.keras.Model(inputs, outputs)
if reload_model:
export_dir = os.path.join(self.get_temp_dir(), "padding-model")
model.save(export_dir, include_optimizer=False)
if reload_model == ReloadModel.KERAS:
model = tf.keras.models.load_model(export_dir)
else:
model = tf.saved_model.load(export_dir)
graph, mask = model(input_graph)
self.assertAllEqual([True, False], mask)
self.assertAllEqual(2, graph.num_components)
self.assertAllEqual([42, 0], graph.context["label"])
nodes = graph.node_sets["nodes"]
self.assertAllEqual([1, 2], nodes.sizes)
self.assertAllEqual([[1., 2.], [0., 0.], [0., 0.]], nodes["feature"])
edges = graph.edge_sets["edges"]
self.assertAllEqual([1, 3], edges.sizes)
self.assertAllEqual([1., 0., 0., 0.], edges["weight"])
if __name__ == "__main__":
tf.test.main()
|
testfiles/cmd/cmd.py | mindriot101/pyq | 144 | 12637363 | <gh_stars>100-1000
class Foo(object):
pass
@decorator
class Bar(object):
def foo(self):
pass
def baz(arg1, arg2):
pass
foo() | bar()
|
passwords/passfilter.py | m00tiny/scripts | 877 | 12637376 | <reponame>m00tiny/scripts<filename>passwords/passfilter.py
#!/usr/bin/env python
# Copyright (c) 2012, AverageSecurityGuy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of AverageSecurityGuy nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
import argparse
import re
import sys
#------------------------------------------------------------------------------
# Function Definitions
#------------------------------------------------------------------------------
def parse_word(word, s):
"""Parses the word and counts the number of digits, lowercase letters,
uppercase letters, and symbols. Returns a dictionary with the results.
If any character in the word is not in the set of digits, lowercase
letters, uppercase letters, or symbols it is marked as a bad character.
Words with bad characters are not output."""
count = {'d': 0, 'l': 0, 'u': 0, 's': 0, 'x':0}
d = '0123456789'
l = 'abcdefghijklmnopqrstuvwxyz'
u = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
for c in word:
if c in d:
count['d'] += 1
elif c in l:
count['l'] += 1
elif c in u:
count['u'] += 1
elif c in s:
count['s'] += 1
else:
count['x'] += 1
return count
def parse_requirements(r):
"""Determine which characters are required and the number of them that
are required."""
req = {'d': 0, 'l': 0, 'u': 0, 's': 0}
for c in r:
if c == 'd':
req['d'] += 1
elif c == 'l':
req['l'] += 1
elif c == 'u':
req['u'] += 1
elif c == 's':
req['s'] += 1
else:
continue
return req
def complex_pass(count):
"""Windows complexity requires a password to contain three of the four
groups: digits, lowercase letters, uppercase letters, or symbols."""
if count['d'] and count['l'] and count['u']:
return True
elif count['d'] and count['l'] and count['s']:
return True
elif count['d'] and count['u'] and count['s']:
return True
elif count['l'] and count['u'] and count['s']:
return True
else:
return False
def meets_requirements(count, r):
"""Does the password have enough of each type of character to meet the
requirements?"""
if (count['d'] >= r['d'] and count['l'] >= r['l'] and
count['u'] >= r['u'] and count['s'] >= r['s']):
return True
else:
return False
#------------------------------------------------------------------------------
# Main Program
#------------------------------------------------------------------------------
desc = """Passfilter.py reads a file or stdin and returns words that meet the
defined requirements. For most password policies the set of allowed letters
and numbers is the same. The set of allowed symbols varies widely between
policies. Passfilter.py defines a default set of symbols which can be
overridden using the -s flag.
Examples:
Return all words 3 to 10 characters long.
passfilter.py -f wordlist
Return all words 3 to 10 characters long that meet the windows complexity
requirements.
passfilter.py -w -f wordlist
Return all words 5 to 9 characters long that have at least two lowercase
letters and at least one digit.
passfilter.py -m 5 -x 9 -r lld -f wordlist
"""
parser = argparse.ArgumentParser(prog="Passfilter.py",
formatter_class=argparse.RawDescriptionHelpFormatter,
description=desc)
group = parser.add_mutually_exclusive_group()
group.add_argument('-w', action='store_true', default=False,
help='Passwords must meet Windows complexity requirements.')
group.add_argument('-r', action='store', default='', metavar='string',
help='''String representing the character groups and count
required.''')
parser.add_argument('-m', action='store', type=int, default='3', metavar='min',
help='Minimum password length. (default: 3)')
parser.add_argument('-x', action='store', type=int, default='10', metavar='max',
help='Maximum password length. (default: 10)')
parser.add_argument('-s', action='store', default=''' !"#$%&'()*+,-./:;<=>?@[\]^_`{|}~''',
help='''Symbols allowed in the password.
(default: !"#$%%&'()*+,-./:;<=>?@[\]^_`{|}~)''',
metavar='symbols')
parser.add_argument('-f', metavar='wordlist',
help='Wordlist to parse (default: stdin).')
args = parser.parse_args()
# Open the file or stdin
if args.f:
try:
wordlist = open(args.f, 'r')
except IOError:
print "Could not open file %s" % args.f
sys.exit()
else:
wordlist = sys.stdin
for line in wordlist:
# Skip blank lines and comments in the word list
if re.match('^$', line):
continue
if re.match('^#.*$', line):
continue
# Strip the new line character and check the word for length requirements
word = line.rstrip('\r\n')
if len(word) < args.m:
continue
if len(word) > args.x:
continue
# Count the occurrance of each type of character.
count = parse_word(word, args.s)
# If any character did not match the allowed characters, skip the word
if count['x'] > 0:
continue
# If requirements were provided then check to see if the word meets the
# requirements. If it does then keep it, if not, move to the next word.
if args.r:
if meets_requirements(count, parse_requirements(args.r)):
print word
continue
else:
continue
# If we require Windows complexity then check to see if the word meets the
# windows complexity requirements. If it does then keep it, if not, move to
# the next word.
if args.w:
if complex_pass(count):
print word
continue
else:
continue
else:
print word
if wordlist is not sys.stdin:
wordlist.close()
|
applications/tensorflow/sales_forecasting/test_main.py | payoto/graphcore_examples | 260 | 12637414 | # Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import os
from tempfile import TemporaryDirectory
import pytest
from examples_tests.test_util import SubProcessChecker
working_path = os.path.dirname(__file__)
class Test(SubProcessChecker):
""" Test the sales forecasting model """
@classmethod
def setUpClass(self):
super(Test, self).setUpClass()
@pytest.mark.ipus(1)
def test_sales_forecasting_one_ipu(self):
"""Test that the model runs on one IPU for one epoch."""
with TemporaryDirectory() as temp_dir:
self.run_command(
(
"python3 main.py --use-synthetic-data --epochs 1"
f" --mov-mean-window 0 --log-dir {temp_dir}"
),
working_path,
[
"Begin training loop", "Training:", r"epoch:\s+1",
"Validation:", "Best RMSPE|no valid RMSPE results"
]
)
@pytest.mark.ipus(2)
def test_sales_forecasting_two_ipus(self):
"""Test that the model runs when replicated over two IPUs for
one epoch."""
with TemporaryDirectory() as temp_dir:
self.run_command(
(
"python3 main.py --use-synthetic-data --epochs 1"
f" --mov-mean-window 0 --log-dir {temp_dir}"
" --replication-factor 2"
),
working_path,
[
"Begin training loop", "Training:", r"epoch:\s+1",
"Validation:", "Best RMSPE|no valid RMSPE results"
]
)
@pytest.mark.ipus(2)
def test_sales_forecasting_multiprocessing(self):
"""Test that the model runs with multiprocessing enabled."""
with TemporaryDirectory() as temp_dir:
self.run_command(
(
"python3 main.py --use-synthetic-data --epochs 1"
f" --mov-mean-window 0 --log-dir {temp_dir}"
" --multiprocessing"
),
working_path,
[
"Begin training loop", "Training:", r"epoch:\s+1",
"Validation:", "Best RMSPE|no valid RMSPE results"
]
)
|
python_toolbox/misc_tools/proxy_property.py | hboshnak/python_toolbox | 119 | 12637422 | # Copyright 2009-2017 <NAME>.
# This program is distributed under the MIT license.
import re
class ProxyProperty:
'''
Property that serves as a proxy to an attribute of the parent object.
When you create a `ProxyProperty`, you pass in the name of the attribute
(or nested attribute) that it should proxy. (Prefixed with a dot.) Then,
every time the property is `set`ed or `get`ed, the attribute is `set`ed or
`get`ed instead.
Example:
class Chair:
def __init__(self, whatever):
self.whatever = whatever
whatever_proxy = ProxyProperty('.whatever')
chair = Chair(3)
assert chair.whatever == chair.whatever_proxy == 3
chair.whatever_proxy = 4
assert chair.whatever == chair.whatever_proxy == 4
You may also refer to a nested attribute of the object rather than a direct
one; for example, you can do `ProxyProperty('.whatever.x.height')` and it
will access the `.height` attribute of the `.x` attribute of `.whatever`.
'''
def __init__(self, attribute_name, doc=None):
'''
Construct the `ProxyProperty`.
`attribute_name` is the name of the attribute that we will proxy,
prefixed with a dot, like '.whatever'.
You may also refer to a nested attribute of the object rather than a
direct one; for example, you can do
`ProxyProperty('.whatever.x.height')` and it will access the `.height`
attribute of the `.x` attribute of `.whatever`.
You may specify a docstring as `doc`.
'''
if not attribute_name.startswith('.'):
raise Exception(
f"The `attribute_name` must start with a dot to make it clear "
f"it's an attribute. {repr(attribute_name)} does not start "
f"with a dot."
)
self.getter = self.setter = None
exec(f'def getter(thing): return thing{attribute_name}')
exec(f'def setter(thing, value): thing{attribute_name} = value')
exec('self.getter, self.setter = getter, setter')
self.attribute_name = attribute_name[1:]
self.__doc__ = doc
def __get__(self, thing, our_type=None):
if thing is None:
# We're being accessed from the class itself, not from an object
return self
else:
return self.getter(thing)
def __set__(self, thing, value):
# todo: should I check if `thing` is `None` and set on class? Same for
# `__delete__`?
return self.setter(thing, value)
def __repr__(self):
return '<%s: %s%s>' % (
type(self).__name__,
repr(f'.{self.attribute_name}'),
f', doc={repr(self.__doc__)}' if self.__doc__ else ''
)
|
natlas-server/app/api/rescan_handler.py | purplesecops/natlas | 500 | 12637453 | <filename>natlas-server/app/api/rescan_handler.py
from flask import current_app
from app import db
def mark_scan_dispatched(rescan):
rescan.dispatchTask()
db.session.add(rescan)
db.session.commit()
current_app.ScopeManager.update_pending_rescans()
current_app.ScopeManager.update_dispatched_rescans()
return
def mark_scan_completed(ip, scan_id):
dispatched = current_app.ScopeManager.get_dispatched_rescans()
for scan in dispatched:
if scan.target == ip:
scan.completeTask(scan_id)
db.session.add(scan)
db.session.commit()
current_app.ScopeManager.update_dispatched_rescans()
return True
return False
|
data/dataset_jester.py | Hugo-cheng/ACTION-Net | 146 | 12637477 | <filename>data/dataset_jester.py
import os
import sys
import pickle
import numpy as np
import pandas as pd
import random
import torch
import pdb
from torch.utils.data import Dataset, DataLoader,RandomSampler
import torchvision.transforms as transforms
from torchvision.utils import save_image
from PIL import Image
import matplotlib.pyplot as plt
from tqdm import tqdm, trange
import random
import skimage.util as ski_util
from sklearn.utils import shuffle
from copy import copy
def load_video(annot_path, mode):
# mode: train, val, test
csv_file = os.path.join(annot_path, '{}.pkl'.format(mode))
annot_df = pd.read_pickle(csv_file)
rgb_samples = []
depth_samples = []
labels = []
for frame_i in range(annot_df.shape[0]):
rgb_list = annot_df['frame'].iloc[frame_i] # convert string in dataframe to list
rgb_samples.append(rgb_list)
labels.append(annot_df['label'].iloc[frame_i])
print('{}: {} videos have been loaded'.format(mode, len(rgb_samples)))
return rgb_samples, labels
class dataset_video(Dataset):
def __init__(self, root_path, mode, spatial_transform=None, temporal_transform=None):
self.root_path = root_path
self.rgb_samples, self.labels = load_video(root_path, mode)
self.sample_num = len(self.rgb_samples)
self.spatial_transform = spatial_transform
self.temporal_transform = temporal_transform
def __getitem__(self, idx):
rgb_name = self.rgb_samples[idx]
label = self.labels[idx]
indices = [i for i in range(len(rgb_name))]
selected_indice = self.temporal_transform(indices)
clip_frames = []
for i, frame_name_i in enumerate(selected_indice):
rgb_cache = Image.open(rgb_name[frame_name_i]).convert("RGB")
clip_frames.append(rgb_cache)
clip_frames = self.spatial_transform(clip_frames)
n, h, w = clip_frames.size()
return clip_frames.view(-1, 3, h, w), int(label)
def __len__(self):
return int(self.sample_num)
class dataset_video_inference(Dataset):
def __init__(self, root_path, mode, clip_num = 2, spatial_transform=None, temporal_transform=None):
self.root_path = root_path
self.clip_num = clip_num
self.video_samples, self.labels = load_video(root_path, mode)
self.mode = mode
self.sample_num = len(self.video_samples)
self.spatial_transform = spatial_transform
self.temporal_transform = temporal_transform
def __getitem__(self, idx):
rgb_name = self.video_samples[idx]
label = self.labels[idx]
indices = [i for i in range(len(rgb_name))]
video_clip = []
for win_i in range(self.clip_num):
clip_frames = []
selected_indice = self.temporal_transform(copy(indices))
for frame_name_i in selected_indice:
rgb_cache = Image.open(rgb_name[frame_name_i]).convert("RGB")
clip_frames.append(rgb_cache)
clip_frames = self.spatial_transform(clip_frames)
n, h, w = clip_frames.size()
video_clip.append(clip_frames.view(-1, 3, h, w))
video_clip = torch.stack(video_clip)
return video_clip, int(label)
def __len__(self):
return int(self.sample_num)
|
search-samples/recipe-app-website/main.py | Zhogolev/Languagerecognizer | 591 | 12637500 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jinja2
import json
import os
import webapp2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
def load_recipe(recipe_id):
filename = os.path.dirname(__file__) + '/recipes/' + recipe_id + '.json'
recipe = json.loads(open(filename, 'r').read())
recipe['id'] = recipe_id
return recipe
class MainPage(webapp2.RequestHandler):
def get(self):
template_values = {
'title': 'RecipeApp'
}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_values))
class RecipePage(webapp2.RequestHandler):
def get(self, recipe_id):
query = self.request.get('q')
num_results = 0
if query:
num_results = 1
recipe = load_recipe(recipe_id)
ingredient_sections = ['']
ingredients_by_section = {'':[]}
for ingredient in recipe['ingredients']:
if 'category' in ingredient:
category = ingredient['category']
ingredient_section = []
if not category in ingredients_by_section:
ingredients_by_section[category] = ingredient_section
ingredient_sections.append(category)
else:
ingredient_section = ingredients_by_section[category]
ingredient_section.append(ingredient)
else:
ingredients_by_section[''].append(ingredient)
template_values = {
'title': recipe['title'],
'recipe': recipe,
'ingredients': ingredients_by_section,
'ingredient_sections': ingredient_sections,
'query': query,
'num_results': num_results
}
template = JINJA_ENVIRONMENT.get_template('recipe.html')
self.response.write(template.render(template_values))
class SearchResultsPage(webapp2.RequestHandler):
def get(self):
query = self.request.get('q')
results = []
clean_query = query.lower().strip()
if clean_query.endswith('recipes'):
clean_query = clean_query[:-7].strip()
for recipe_id in ['grilled-potato-salad', 'haloumi-salad', 'pierogi-poutine', 'wedge-salad', 'malaga-paella']:
recipe = load_recipe(recipe_id)
if recipe['title'].lower().find(clean_query) >= 0:
results.append(recipe)
if len(results) == 1:
self.redirect('/recipe/' + results[0]['id'] + '?q=' + query)
else:
template_values = {
'title': '"' + query + '" - RecipeApp',
'query': query,
'results': results,
'num_results': len(results)
}
template = JINJA_ENVIRONMENT.get_template('search.html')
self.response.write(template.render(template_values))
application = webapp2.WSGIApplication([
('/', MainPage),
(r'/recipe/(.+)', RecipePage),
(r'/search', SearchResultsPage)
], debug=True) |
pypi_server/handlers/api/users.py | jayvdb/pypi-server | 119 | 12637510 | # encoding: utf-8
import re
from tornado.web import HTTPError
from pypi_server.db.users import Users
from pypi_server.handlers import route
from pypi_server.handlers.base import threaded
from pypi_server.handlers.api import JSONHandler
from pypi_server.handlers.api.login import authorization_required
LOGIN_EXP = re.compile("^[\d\w\.\-\@\_]+$")
EMAIL_EXP = re.compile("^[^\@]+\@\S+$")
@route('/api/v1/users/?')
class UsersHandler(JSONHandler):
@authorization_required(is_admin=True)
@threaded
def get(self):
self.response(
list(
map(
lambda x: dict(
id=x.id,
login=x.login,
email=x.email,
is_admin=x.is_admin,
disabled=x.disabled,
),
Users.select(
Users.id,
Users.login,
Users.email,
Users.is_admin,
Users.disabled
)
)
)
)
@authorization_required(is_admin=True)
@threaded
def post(self):
try:
login = self.json["login"]
email = self.json["email"]
is_admin = bool(self.json.get("is_admin", 0))
password = self.json["password"]
assert password and len(password) > 3
assert LOGIN_EXP.match(login)
assert EMAIL_EXP.match(email)
except (KeyError, AssertionError, TypeError):
raise HTTPError(400)
if Users.select().where(Users.login == login).count():
raise HTTPError(409)
user = Users(
login=login,
email=email,
is_admin=is_admin,
password=password,
)
user.save()
self.response({
'id': user.id,
'login': user.login,
'email': user.email,
'is_admin': user.is_admin,
})
|
omnizart/transcribe_all.py | nicolasanjoran/omnizart | 1,145 | 12637517 | # import pretty_midi
# from omnizart.music import app as music_app
# from omnizart.drum import app as drum_app
# from omnizart.chord import app as chord_app
def process(input_audio, model_path=None, output="./"):
pass
|
1.Cnn_Captcha/create_image/cteate_image.py | duyuankai1992/tensorflow-1 | 927 | 12637535 | <gh_stars>100-1000
#coding:utf-8
from captcha.image import ImageCaptcha # pip install captcha
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import random,time
# 验证码中的字符, 就不用汉字了
number = ['0','1','2','3','4','5','6','7','8','9']
alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
ALPHABET = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
# 验证码一般都无视大小写;验证码长度4个字符
def random_captcha_text(char_set=number+alphabet+ALPHABET, captcha_size=4):
captcha_text = []
for i in range(captcha_size):
c = random.choice(char_set)
captcha_text.append(c)
return captcha_text
# 生成字符对应的验证码
def gen_captcha_text_and_image():
image = ImageCaptcha()
captcha_text = random_captcha_text()
captcha_text = ''.join(captcha_text)
captcha = image.generate(captcha_text)
image.write(captcha_text, captcha_text + '.jpg') # 写到文件
captcha_image = Image.open(captcha)
captcha_image = np.array(captcha_image)
return captcha_text, captcha_image
if __name__ == '__main__':
# 测试
while(1):
text, image = gen_captcha_text_and_image()
print 'begin ',time.ctime(),type(image)
f = plt.figure()
ax = f.add_subplot(111)
ax.text(0.1, 0.9,text, ha='center', va='center', transform=ax.transAxes)
plt.imshow(image)
#plt.show()
print 'end ',time.ctime()
|
mindinsight/debugger/__init__.py | mindspore-ai/mindinsight | 216 | 12637605 | # Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Debugger Introduction.
This module provides Python APIs to retrieve the debugger info. The APIs can
help users to understand the training process and find the bugs in training
script.
"""
from mindinsight.debugger.api.conditions import \
Watchpoint, WatchpointHit, TensorTooLargeCondition, TensorUnchangedCondition, TensorAllZeroCondition, \
TensorOverflowCondition, OperatorOverflowCondition, TensorRangeCondition, TensorTooSmallCondition, \
TensorChangeBelowThresholdCondition, TensorChangeAboveThresholdCondition, ConditionBase
from mindinsight.debugger.api.debugger_tensor import DebuggerTensor
from mindinsight.debugger.api.dump_analyzer import DumpAnalyzer
from mindinsight.debugger.api.node import Node
__all__ = ["DumpAnalyzer", "Node", "DebuggerTensor", "Watchpoint",
"WatchpointHit",
"TensorTooLargeCondition",
"TensorTooSmallCondition",
"TensorRangeCondition",
"TensorOverflowCondition",
"OperatorOverflowCondition",
"TensorAllZeroCondition",
"TensorUnchangedCondition",
"TensorChangeBelowThresholdCondition",
"TensorChangeAboveThresholdCondition",
"ConditionBase"
]
|
integration/common/core.py | meldafrawi/longhorn-engine | 160 | 12637610 | import fcntl
import struct
import os
import grpc
import tempfile
import time
import random
import subprocess
import string
import threading
import pytest
from rpc.controller.controller_client import ControllerClient
import common.cmd as cmd
from common.util import read_file, checksum_data
from common.frontend import blockdev, get_block_device_path
from common.constants import (
LONGHORN_BINARY, LONGHORN_UPGRADE_BINARY, LONGHORN_DEV_DIR,
VOLUME_NAME, VOLUME_BACKING_NAME,
SIZE, PAGE_SIZE, SIZE_STR,
BACKUP_DIR, BACKING_FILE_RAW,
FRONTEND_TGT_BLOCKDEV,
RETRY_COUNTS, RETRY_INTERVAL, RETRY_COUNTS2,
RETRY_COUNTS_SHORT, RETRY_INTERVAL_SHORT,
PROC_STATE_STARTING, PROC_STATE_RUNNING,
PROC_STATE_ERROR,
ENGINE_NAME, EXPANDED_SIZE_STR,
VOLUME_NO_FRONTEND_NAME,
FIXED_REPLICA_PATH1, FIXED_REPLICA_PATH2,
)
thread_failed = False
def _file(f):
return os.path.join(_base(), '../../{}'.format(f))
def _base():
return os.path.dirname(__file__)
def cleanup_process(pm_client):
for name in pm_client.process_list():
try:
pm_client.process_delete(name)
except grpc.RpcError as e:
if 'cannot find process' not in e.details():
raise e
for i in range(RETRY_COUNTS):
ps = pm_client.process_list()
if len(ps) == 0:
break
time.sleep(RETRY_INTERVAL)
ps = pm_client.process_list()
assert len(ps) == 0
return pm_client
def wait_for_process_running(client, name):
healthy = False
for i in range(RETRY_COUNTS):
state = client.process_get(name).status.state
if state == PROC_STATE_RUNNING:
healthy = True
break
elif state != PROC_STATE_STARTING:
# invalid state
assert False
time.sleep(RETRY_INTERVAL)
assert healthy
def wait_for_process_error(client, name):
verified = False
for i in range(RETRY_COUNTS):
state = client.process_get(name).status.state
if state == PROC_STATE_ERROR:
verified = True
break
time.sleep(RETRY_INTERVAL)
assert verified
def create_replica_process(client, name, replica_dir="",
args=[], binary=LONGHORN_BINARY,
size=SIZE, port_count=15,
port_args=["--listen,localhost:"],
disable_revision_counter=False):
if not replica_dir:
replica_dir = tempfile.mkdtemp()
if not args:
args = ["replica", replica_dir, "--size", str(size)]
if disable_revision_counter == True:
args += ["--disableRevCounter"]
client.process_create(
name=name, binary=binary, args=args,
port_count=port_count, port_args=port_args)
wait_for_process_running(client, name)
return client.process_get(name)
def create_engine_process(client, name=ENGINE_NAME,
volume_name=VOLUME_NAME,
binary=LONGHORN_BINARY,
listen="", listen_ip="localhost",
size=SIZE, frontend=FRONTEND_TGT_BLOCKDEV,
replicas=[], backends=["file"],
disable_revision_counter=False):
args = ["controller", volume_name]
if frontend != "":
args += ["--frontend", frontend]
if disable_revision_counter == True:
args += ["--disableRevCounter"]
for r in replicas:
args += ["--replica", r]
for b in backends:
args += ["--enable-backend", b]
client.process_create(
name=name, binary=binary, args=args,
port_count=1, port_args=["--listen,localhost:"])
wait_for_process_running(client, name)
return client.process_get(name)
def get_process_address(p):
return "localhost:" + str(p.status.port_start)
def cleanup_controller(grpc_client):
try:
v = grpc_client.volume_get()
except grpc.RpcError as grpc_err:
if "Socket closed" not in grpc_err.details() and \
"failed to connect to all addresses" not in grpc_err.details():
raise grpc_err
return grpc_client
if v.replicaCount != 0:
grpc_client.volume_shutdown()
for r in grpc_client.replica_list():
grpc_client.replica_delete(r.address)
return grpc_client
# TODO: https://github.com/longhorn/longhorn/issues/1857
# For some cases, we can not use get_replica to add the retry,
# Because the grpc_client.replica_get() will error out.
def get_replica_client_with_delay(grpc_client):
time.sleep(3)
return grpc_client
# TODO: https://github.com/longhorn/longhorn/issues/1857
def get_replica(grpc_client):
retry_cnt = 3
while retry_cnt != 0:
try:
r = grpc_client.replica_get()
except grpc.RpcError as grpc_err:
if "Socket closed" not in grpc_err.details():
raise(grpc_err)
print("wait for sometime, and try again")
time.sleep(1)
retry_cnt -= 1
else:
break
if retry_cnt == 0:
print("Failed to run grpc_client with e", grpc_err)
raise(grpc_err)
return r
def cleanup_replica(grpc_client):
r = get_replica(grpc_client)
if r.state == 'initial':
return grpc_client
if r.state == 'closed':
grpc_client.replica_open()
grpc_client.replica_delete()
r = grpc_client.replica_reload()
assert r.state == 'initial'
return grpc_client
# TODO: https://github.com/longhorn/longhorn/issues/1857
def get_controller_version_detail(grpc_controller_client):
retry_cnt = 3
while retry_cnt != 0:
try:
c = grpc_controller_client.version_detail_get()
except grpc.RpcError as grpc_err:
if "Socket closed" not in grpc_err.details():
raise(grpc_err)
print("wait for sometime, and try again")
time.sleep(1)
retry_cnt -= 1
else:
break
if retry_cnt == 0:
print("Failed to run grpc_client with e", grpc_err)
raise(grpc_err)
def random_str():
return 'random-{0}-{1}'.format(random_num(), int(time.time()))
def random_num():
return random.randint(0, 1000000)
def create_backend_file():
name = random_str()
fo = open(name, "w+")
fo.truncate(SIZE)
fo.close()
return os.path.abspath(name)
def cleanup_backend_file(paths):
for path in paths:
if os.path.exists(path):
os.remove(path)
def get_dev_path(name):
return os.path.join(LONGHORN_DEV_DIR, name)
def get_expansion_snapshot_name():
return 'expand-{0}'.format(EXPANDED_SIZE_STR)
def get_replica_paths_from_snapshot_name(snap_name):
replica_paths = []
cmd = ["find", "/tmp", "-name",
'*volume-snap-{0}.img'.format(snap_name)]
snap_paths = subprocess.check_output(cmd).split()
assert snap_paths
for p in snap_paths:
replica_paths.append(os.path.dirname(p.decode('utf-8')))
return replica_paths
def get_snapshot_file_paths(replica_path, snap_name):
return os.path.join(replica_path,
'volume-snap-{0}.img'.format(snap_name))
def get_replica_head_file_path(replica_dir):
cmd = ["find", replica_dir, "-name",
'*volume-head-*.img']
return subprocess.check_output(cmd).strip()
def wait_for_rebuild_complete(url):
completed = 0
rebuild_status = {}
for x in range(RETRY_COUNTS):
completed = 0
rebuild_status = cmd.replica_rebuild_status(url)
for rebuild in rebuild_status.values():
if rebuild['state'] == "complete":
assert rebuild['progress'] == 100
assert not rebuild['isRebuilding']
completed += 1
elif rebuild['state'] == "":
assert not rebuild['isRebuilding']
completed += 1
# Right now add-replica/rebuild is a blocking call.
# Hence the state won't become `in_progress` when
# we check the rebuild status.
elif rebuild['state'] == "in_progress":
assert rebuild['state'] == "in_progress"
assert rebuild['isRebuilding']
else:
assert rebuild['state'] == "error"
assert rebuild['error'] != ""
assert not rebuild['isRebuilding']
if completed == len(rebuild_status):
break
time.sleep(RETRY_INTERVAL)
return completed == len(rebuild_status)
def wait_for_purge_completion(url):
completed = 0
purge_status = {}
for x in range(RETRY_COUNTS):
completed = 0
purge_status = cmd.snapshot_purge_status(url)
for status in purge_status.values():
assert status['progress'] <= 100
assert 'isPurging' in status.keys()
if not status['isPurging']:
assert status['progress'] == 100
completed += 1
assert 'error' in status.keys()
assert status['error'] == ''
if completed == len(purge_status):
break
time.sleep(RETRY_INTERVAL)
assert completed == len(purge_status)
def wait_for_restore_completion(url, backup_url):
completed = 0
rs = {}
for x in range(RETRY_COUNTS):
completed = 0
rs = cmd.restore_status(url)
for status in rs.values():
assert 'state' in status.keys()
if status['backupURL'] != backup_url:
break
if status['state'] == "complete":
assert 'progress' in status.keys()
assert status['progress'] == 100
completed += 1
elif status['state'] == "error":
assert 'error' in status.keys()
assert status['error'] == ""
else:
assert status['state'] == "in_progress"
if completed == len(rs):
break
time.sleep(RETRY_INTERVAL)
assert completed == len(rs)
def restore_with_frontend(url, engine_name, backup):
client = ControllerClient(url)
client.volume_frontend_shutdown()
cmd.backup_restore(url, backup)
wait_for_restore_completion(url, backup)
client.volume_frontend_start(FRONTEND_TGT_BLOCKDEV)
v = client.volume_get()
assert v.frontendState == "up"
return
def verify_no_frontend_data(data_offset, data, grpc_c):
grpc_c.volume_frontend_start(FRONTEND_TGT_BLOCKDEV)
v = grpc_c.volume_get()
assert v.frontendState == "up"
dev = get_blockdev(volume=VOLUME_NO_FRONTEND_NAME)
verify_read(dev, data_offset, data)
grpc_c.volume_frontend_shutdown()
v = grpc_c.volume_get()
assert v.frontendState == "down"
def start_no_frontend_volume(grpc_c, *grpc_r_list):
assert len(grpc_r_list) > 0
grpc_c.volume_frontend_start(FRONTEND_TGT_BLOCKDEV)
for grpc_r in grpc_r_list:
open_replica(grpc_r)
v = grpc_c.volume_start(
replicas=[grpc_r.url for grpc_r in grpc_r_list])
assert v.replicaCount == len(grpc_r_list)
dr_replicas = grpc_c.replica_list()
assert len(dr_replicas) == len(grpc_r_list)
grpc_c.volume_frontend_shutdown()
v = grpc_c.volume_get()
assert v.frontendState == "down"
def cleanup_no_frontend_volume(grpc_c, *grpc_r_list):
grpc_c.volume_frontend_start(FRONTEND_TGT_BLOCKDEV)
v = grpc_c.volume_get()
assert v.frontendState == "up"
cmd.sync_agent_server_reset(grpc_c.address)
grpc_c.volume_frontend_shutdown()
v = grpc_c.volume_get()
assert v.frontendState == "down"
cleanup_controller(grpc_c)
for grpc_r in grpc_r_list:
cleanup_replica(grpc_r)
cleanup_replica_dir(FIXED_REPLICA_PATH1)
cleanup_replica_dir(FIXED_REPLICA_PATH2)
def reset_volume(grpc_c, *grpc_r_list):
complete = True
for i in range(RETRY_COUNTS_SHORT):
complete = True
cmd.sync_agent_server_reset(grpc_c.address)
cleanup_controller(grpc_c)
for grpc_r in grpc_r_list:
cleanup_replica(grpc_r)
open_replica(grpc_r)
# TODO: A simple workaround of race condition.
# See https://github.com/longhorn/longhorn/issues/1628 for details.
time.sleep(1)
v = grpc_c.volume_start(
replicas=[grpc_r.url for grpc_r in grpc_r_list])
rs = grpc_c.replica_list()
if len(rs) != len(grpc_r_list):
complete = False
else:
for r_info in rs:
if r_info.mode != 'RW':
complete = False
break
if complete:
break
time.sleep(RETRY_INTERVAL)
assert complete
return v
def create_backup(url, snap, backup_target, volume_size=SIZE_STR,
backing_image_name="", backing_image_checksum="",
backup_name=""):
backup = cmd.backup_create(url, snap, backup_target,
[], backing_image_name, backing_image_checksum,
backup_name)
backup_info = cmd.backup_inspect(url, backup)
assert backup_info["URL"] == backup
assert backup_info["VolumeSize"] == volume_size
if backing_image_name != "":
assert backup_info["VolumeBackingImageName"] == backing_image_name
assert snap in backup_info["SnapshotName"]
return backup_info
def rm_backups(url, engine_name, backups):
for b in backups:
cmd.backup_rm(url, b)
with pytest.raises(subprocess.CalledProcessError):
restore_with_frontend(url, engine_name, b)
with pytest.raises(subprocess.CalledProcessError):
cmd.backup_inspect(url, b)
# Engine frontend is down, Start it up
client = ControllerClient(url)
client.volume_frontend_start(FRONTEND_TGT_BLOCKDEV)
def rm_snaps(url, snaps):
for s in snaps:
cmd.snapshot_rm(url, s)
cmd.snapshot_purge(url)
wait_for_purge_completion(url)
snap_info_list = cmd.snapshot_info(url)
for s in snaps:
assert s not in snap_info_list
def snapshot_revert_with_frontend(url, engine_name, name):
client = ControllerClient(url)
client.volume_frontend_shutdown()
cmd.snapshot_revert(url, name)
client.volume_frontend_start(FRONTEND_TGT_BLOCKDEV)
def cleanup_replica_dir(dir=""):
if dir and os.path.exists(dir):
try:
cmd = ['rm', '-r', dir + "*"]
subprocess.check_call(cmd)
except Exception:
pass
def open_replica(grpc_client, backing_file=None):
r = grpc_client.replica_get()
assert r.state == 'initial'
assert r.size == '0'
assert r.sector_size == 0
assert r.parent == ''
assert r.head == ''
r = grpc_client.replica_create(size=str(1024 * 4096))
assert r.state == 'closed'
assert r.size == str(1024 * 4096)
assert r.sector_size == 512
assert r.parent == ''
assert r.head == 'volume-head-000.img'
return r
def get_blockdev(volume):
dev = blockdev(volume)
for i in range(10):
if not dev.ready():
time.sleep(1)
assert dev.ready()
return dev
def write_dev(dev, offset, data):
return dev.writeat(offset, data)
def read_dev(dev, offset, length):
return dev.readat(offset, length)
def random_string(length):
return \
''.join(random.choice(string.ascii_lowercase) for x in range(length))
def verify_data(dev, offset, data):
write_dev(dev, offset, data)
readed = read_dev(dev, offset, len(data))
assert data == readed
def prepare_backup_dir(backup_dir):
if os.path.exists(backup_dir):
subprocess.check_call(["rm", "-rf", backup_dir])
os.makedirs(backup_dir)
assert os.path.exists(backup_dir)
def get_backup_volume_url(backup_target, volume_name):
return backup_target + "?volume=" + volume_name
def read_from_backing_file(offset, length):
p = _file(BACKING_FILE_RAW)
return read_file(p, offset, length)
def checksum_dev(dev):
return checksum_data(dev.readat(0, SIZE).encode('utf-8'))
def data_verifier(dev, times, offset, length):
try:
verify_loop(dev, times, offset, length)
except Exception as ex:
global thread_failed
thread_failed = True
raise ex
def verify_loop(dev, times, offset, length):
for i in range(times):
data = random_string(length)
verify_data(dev, offset, data)
def verify_replica_state(grpc_c, addr, state):
if not addr.startswith("tcp://"):
addr = "tcp://" + addr
verified = False
for i in range(RETRY_COUNTS_SHORT):
replicas = grpc_c.replica_list()
assert len(replicas) == 2
for r in replicas:
if r.address == addr and r.mode == state:
verified = True
break
if verified:
break
time.sleep(RETRY_INTERVAL_SHORT)
assert verified
def verify_replica_mode(grpc_c, addr, mode):
if not addr.startswith("tcp://"):
addr = "tcp://" + addr
verified = False
for i in range(RETRY_COUNTS_SHORT):
replicas = grpc_c.replica_list()
snapList = cmd.snapshot_ls(grpc_c.address)
for r in replicas:
if r.address == addr and r.mode == mode:
verified = True
break
if verified:
break
time.sleep(RETRY_INTERVAL_SHORT)
assert verified
def verify_read(dev, offset, data):
for i in range(10):
readed = read_dev(dev, offset, len(data))
assert data == readed
def verify_async(dev, times, length, count):
assert length * count < SIZE
threads = []
for i in range(count):
t = threading.Thread(target=data_verifier,
args=(dev, times, i * PAGE_SIZE, length))
t.start()
threads.append(t)
for i in range(count):
threads[i].join()
global thread_failed
if thread_failed:
thread_failed = False
raise Exception("data_verifier thread failed")
def get_dev(grpc_replica1, grpc_replica2, grpc_controller,
clean_backup_dir=True):
if clean_backup_dir:
prepare_backup_dir(BACKUP_DIR)
v = reset_volume(grpc_controller, grpc_replica1, grpc_replica2)
return get_blockdev(v.name)
def random_offset(size, existings={}):
assert size < PAGE_SIZE
for i in range(RETRY_COUNTS):
offset = 0
if int(SIZE) != size:
offset = random.randrange(0, int(SIZE) - size, PAGE_SIZE)
collided = False
# it's [start, end) vs [pos, pos + size)
for start, end in existings.items():
if offset + size <= start or offset >= end:
continue
collided = True
break
if not collided:
break
assert not collided
existings[offset] = offset + size
return offset
def random_length(length_limit):
return random.randint(1, length_limit - 1)
class Data:
def __init__(self, offset, length, content):
self.offset = offset
self.length = length
self.content = content
def write_and_verify_data(self, dev):
verify_data(dev, self.offset, self.content)
def read_and_verify_data(self, dev):
assert read_dev(dev, self.offset, self.length) == self.content
def read_and_refute_data(self, dev):
assert read_dev(dev, self.offset, self.length) != self.content
class Snapshot:
def __init__(self, dev, data, controller_addr):
self.dev = dev
self.data = data
self.controller_addr = controller_addr
self.data.write_and_verify_data(self.dev)
self.checksum = checksum_dev(self.dev)
self.name = cmd.snapshot_create(controller_addr)
# verify the whole disk is at the state when snapshot was taken
def verify_checksum(self):
assert checksum_dev(self.dev) == self.checksum
def verify_data(self):
self.data.read_and_verify_data(self.dev)
def refute_data(self):
self.data.read_and_refute_data(self.dev)
def generate_random_data(dev, existings={}, length_limit=PAGE_SIZE):
length = random_length(length_limit)
return Data(random_offset(length, existings),
length,
random_string(length))
def expand_volume_with_frontend(grpc_controller_client, size): # NOQA
grpc_controller_client.volume_frontend_shutdown()
grpc_controller_client.volume_expand(size)
wait_for_volume_expansion(grpc_controller_client, size)
grpc_controller_client.volume_frontend_start(FRONTEND_TGT_BLOCKDEV)
def wait_for_volume_expansion(grpc_controller_client, size): # NOQA
for i in range(RETRY_COUNTS):
volume = grpc_controller_client.volume_get()
if not volume.isExpanding and volume.size == size:
break
time.sleep(RETRY_INTERVAL)
assert not volume.isExpanding
assert volume.size == size
return volume
def check_block_device_size(volume_name, size):
device_path = get_block_device_path(volume_name)
# BLKGETSIZE64, result is bytes as unsigned 64-bit integer (uint64)
req = 0x80081272
buf = ' ' * 8
with open(device_path) as dev:
buf = fcntl.ioctl(dev.fileno(), req, buf)
device_size = struct.unpack('L', buf)[0]
assert device_size == size
def wait_and_check_volume_expansion(grpc_controller_client, size):
v = wait_for_volume_expansion(grpc_controller_client, size)
check_block_device_size(v.name, size)
def delete_process(client, name):
try:
client.process_delete(name)
except grpc.RpcError as e:
if 'cannot find process' not in e.details():
raise e
def wait_for_process_deletion(client, name):
deleted = False
for i in range(RETRY_COUNTS):
rs = client.process_list()
if name not in rs:
deleted = True
break
time.sleep(RETRY_INTERVAL)
assert deleted
def check_dev_existence(volume_name):
found = False
for i in range(RETRY_COUNTS):
if os.path.exists(get_dev_path(volume_name)):
found = True
break
time.sleep(RETRY_INTERVAL)
assert found
def wait_for_dev_deletion(volume_name):
found = True
for i in range(RETRY_COUNTS):
if not os.path.exists(get_dev_path(volume_name)):
found = False
break
time.sleep(RETRY_INTERVAL)
assert not found
def upgrade_engine(client, binary, engine_name, volume_name, replicas):
args = ["controller", volume_name, "--frontend", FRONTEND_TGT_BLOCKDEV,
"--upgrade"]
for r in replicas:
args += ["--replica", r]
return client.process_replace(
engine_name, binary, args,
)
|
desktop/core/ext-py/py4j-0.9/src/py4j/finalizer.py | kokosing/hue | 5,079 | 12637615 | # -*- coding: UTF-8 -*-
"""
Module that defines a Finalizer class responsible for registering and cleaning
finalizer
Created on Mar 7, 2010
:author: <NAME>
"""
from __future__ import unicode_literals, absolute_import
from threading import RLock
from py4j.compat import items
class ThreadSafeFinalizer(object):
"""A `ThreadSafeFinalizer` is a global class used to register weak
reference finalizers (i.e., a weak reference with a callback).
This class is useful when one wants to register a finalizer of an object
with circular references. The finalizer of an object with circular
references might never be called if the object's finalizer is kept by the
same object.
For example, if object A refers to B and B refers to A, A should not keep a
weak reference to itself.
`ThreadSafeFinalizer` is thread-safe and uses reentrant lock on each
operation."""
finalizers = {}
lock = RLock()
@classmethod
def add_finalizer(cls, id, weak_ref):
"""Registers a finalizer with an id.
:param id: The id of the object referenced by the weak reference.
:param weak_ref: The weak reference to register.
"""
with cls.lock:
cls.finalizers[id] = weak_ref
@classmethod
def remove_finalizer(cls, id):
"""Removes a finalizer associated with this id.
:param id: The id of the object for which the finalizer will be
deleted.
"""
with cls.lock:
cls.finalizers.pop(id, None)
@classmethod
def clear_finalizers(cls, clear_all=False):
"""Removes all registered finalizers.
:param clear_all: If `True`, all finalizers are deleted. Otherwise,
only the finalizers from an empty weak reference are deleted
(i.e., weak references pointing to inexistent objects).
"""
with cls.lock:
if clear_all:
cls.finalizers.clear()
else:
for id, ref in items(cls.finalizers):
if ref() is None:
cls.finalizers.pop(id, None)
class Finalizer(object):
"""A `Finalizer` is a global class used to register weak reference finalizers
(i.e., a weak reference with a callback).
This class is useful when one wants to register a finalizer of an object
with circular references. The finalizer of an object with circular
references might never be called if the object's finalizer is kept by the
same object.
For example, if object A refers to B and B refers to A, A should not keep a
weak reference to itself.
`Finalizer` is not thread-safe and should only be used by single-threaded
programs."""
finalizers = {}
@classmethod
def add_finalizer(cls, id, weak_ref):
"""Registers a finalizer with an id.
:param id: The id of the object referenced by the weak reference.
:param weak_ref: The weak reference to register.
"""
cls.finalizers[id] = weak_ref
@classmethod
def remove_finalizer(cls, id):
"""Removes a finalizer associated with this id.
:param id: The id of the object for which the finalizer will be
deleted.
"""
cls.finalizers.pop(id, None)
@classmethod
def clear_finalizers(cls, clear_all=False):
"""Removes all registered finalizers.
:param clear_all: If `True`, all finalizers are deleted. Otherwise,
only the finalizers from an empty weak reference are deleted (i.e.,
weak references pointing to inexistent objects).
"""
if clear_all:
cls.finalizers.clear()
else:
for id, ref in items(cls.finalizers):
if ref() is None:
cls.finalizers.pop(id, None)
def clear_finalizers(clear_all=False):
"""Removes all registered finalizers in :class:`ThreadSafeFinalizer` and
:class:`Finalizer`.
:param clear_all: If `True`, all finalizers are deleted. Otherwise, only
the finalizers from an empty weak reference are deleted (i.e., weak
references pointing to inexistent objects).
"""
ThreadSafeFinalizer.clear_finalizers(clear_all)
Finalizer.clear_finalizers(clear_all)
|
test/regexp/python11.py | kylebarron/MagicPython | 1,482 | 12637637 | <filename>test/regexp/python11.py<gh_stars>1000+
a = r"""
(?x) # multi-XXXline XXX regexp
foo (?# comm TODOent TODO)
foo # type: int
foo (?# type: int)
"""
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
r : source.python, storage.type.string.python, string.regexp.quoted.multi.python
""" : punctuation.definition.string.begin.python, source.python, string.regexp.quoted.multi.python
: source.python, string.regexp.quoted.multi.python
(?x) : source.python, storage.modifier.flag.regexp, string.regexp.quoted.multi.python
: source.python, string.regexp.quoted.multi.python
# : comment.line.number-sign.python, punctuation.definition.comment.python, source.python, string.regexp.quoted.multi.python
multi-XXXline : comment.line.number-sign.python, source.python, string.regexp.quoted.multi.python
XXX : comment.line.number-sign.python, keyword.codetag.notation.python, source.python, string.regexp.quoted.multi.python
regexp : comment.line.number-sign.python, source.python, string.regexp.quoted.multi.python
foo : source.python, string.regexp.quoted.multi.python
(?# : comment.regexp, punctuation.comment.begin.regexp, source.python, string.regexp.quoted.multi.python
comm TODOent : comment.regexp, source.python, string.regexp.quoted.multi.python
TODO : comment.regexp, keyword.codetag.notation.python, source.python, string.regexp.quoted.multi.python
) : comment.regexp, punctuation.comment.end.regexp, source.python, string.regexp.quoted.multi.python
foo : source.python, string.regexp.quoted.multi.python
# : comment.line.number-sign.python, punctuation.definition.comment.python, source.python, string.regexp.quoted.multi.python
type: int : comment.line.number-sign.python, source.python, string.regexp.quoted.multi.python
foo : source.python, string.regexp.quoted.multi.python
(?# : comment.regexp, punctuation.comment.begin.regexp, source.python, string.regexp.quoted.multi.python
type: int : comment.regexp, source.python, string.regexp.quoted.multi.python
) : comment.regexp, punctuation.comment.end.regexp, source.python, string.regexp.quoted.multi.python
""" : punctuation.definition.string.end.python, source.python, string.regexp.quoted.multi.python
|
research/object_detection/utils/test_case.py | gujralsanyam22/models | 82,518 | 12637642 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A convenience wrapper around tf.test.TestCase to test with TPU, TF1, TF2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import zip
import tensorflow.compat.v1 as tf
from tensorflow.python import tf2 # pylint: disable=import-outside-toplevel
from object_detection.utils import tf_version
if not tf2.enabled():
from tensorflow.contrib import tpu as contrib_tpu # pylint: disable=g-import-not-at-top, line-too-long
flags = tf.app.flags
flags.DEFINE_bool('tpu_test', False, 'Deprecated Flag.')
FLAGS = flags.FLAGS
class TestCase(tf.test.TestCase):
"""Base Test class to handle execution under {TF1.X, TF2.X} x {TPU, CPU}.
This class determines the TF version and availability of TPUs to set up
tests appropriately.
"""
def maybe_extract_single_output(self, outputs):
if isinstance(outputs, list) or isinstance(outputs, tuple):
if isinstance(outputs[0], tf.Tensor):
outputs_np = [output.numpy() for output in outputs]
else:
outputs_np = outputs
if len(outputs_np) == 1:
return outputs_np[0]
else:
return outputs_np
else:
if isinstance(outputs, tf.Tensor):
return outputs.numpy()
else:
return outputs
def has_tpu(self):
"""Returns whether there are any logical TPU devices."""
return bool(tf.config.experimental.list_logical_devices(device_type='TPU'))
def is_tf2(self):
"""Returns whether TF2 is enabled."""
return tf_version.is_tf2()
def execute_tpu_tf1(self, compute_fn, inputs, graph=None):
"""Executes compute_fn on TPU with Tensorflow 1.X.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
graph: (optional) If not None, provided `graph` is used for computation
instead of a brand new tf.Graph().
Returns:
A list of numpy arrays or a single numpy array.
"""
with self.session(graph=(graph or tf.Graph())) as sess:
placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]
def wrap_graph_fn(*args, **kwargs):
results = compute_fn(*args, **kwargs)
if (not (isinstance(results, dict) or isinstance(results, tf.Tensor))
and hasattr(results, '__iter__')):
results = list(results)
return results
tpu_computation = contrib_tpu.rewrite(wrap_graph_fn, placeholders)
sess.run(contrib_tpu.initialize_system())
sess.run([tf.global_variables_initializer(), tf.tables_initializer(),
tf.local_variables_initializer()])
materialized_results = sess.run(tpu_computation,
feed_dict=dict(zip(placeholders, inputs)))
sess.run(contrib_tpu.shutdown_system())
return self.maybe_extract_single_output(materialized_results)
def execute_tpu_tf2(self, compute_fn, inputs):
"""Executes compute_fn on TPU with Tensorflow 2.X.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
Returns:
A list of numpy arrays or a single numpy array.
"""
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
tf.config.experimental_connect_to_cluster(resolver)
topology = tf.tpu.experimental.initialize_tpu_system(resolver)
device_assignment = tf.tpu.experimental.DeviceAssignment.build(
topology, num_replicas=1)
strategy = tf.distribute.experimental.TPUStrategy(
resolver, device_assignment=device_assignment)
@tf.function
def run():
tf_inputs = [tf.constant(input_t) for input_t in inputs]
return strategy.run(compute_fn, args=tf_inputs)
outputs = run()
tf.tpu.experimental.shutdown_tpu_system()
return self.maybe_extract_single_output(outputs)
def execute_cpu_tf1(self, compute_fn, inputs, graph=None):
"""Executes compute_fn on CPU with Tensorflow 1.X.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
graph: (optional) If not None, provided `graph` is used for computation
instead of a brand new tf.Graph().
Returns:
A list of numpy arrays or a single numpy array.
"""
if self.is_tf2():
raise ValueError('Required version Tenforflow 1.X is not available.')
with self.session(graph=(graph or tf.Graph())) as sess:
placeholders = [tf.placeholder_with_default(v, v.shape) for v in inputs]
results = compute_fn(*placeholders)
if (not (isinstance(results, dict) or isinstance(results, tf.Tensor)) and
hasattr(results, '__iter__')):
results = list(results)
sess.run([tf.global_variables_initializer(), tf.tables_initializer(),
tf.local_variables_initializer()])
materialized_results = sess.run(results, feed_dict=dict(zip(placeholders,
inputs)))
return self.maybe_extract_single_output(materialized_results)
def execute_cpu_tf2(self, compute_fn, inputs):
"""Executes compute_fn on CPU with Tensorflow 2.X.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
Returns:
A list of numpy arrays or a single numpy array.
"""
if not self.is_tf2():
raise ValueError('Required version TensorFlow 2.0 is not available.')
@tf.function
def run():
tf_inputs = [tf.constant(input_t) for input_t in inputs]
return compute_fn(*tf_inputs)
return self.maybe_extract_single_output(run())
def execute_cpu(self, compute_fn, inputs, graph=None):
"""Executes compute_fn on CPU.
Depending on the underlying TensorFlow installation (build deps) runs in
either TF 1.X or TF 2.X style.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
graph: (optional) If not None, provided `graph` is used for computation
instead of a brand new tf.Graph().
Returns:
A list of numpy arrays or a single tensor.
"""
if self.is_tf2():
return self.execute_cpu_tf2(compute_fn, inputs)
else:
return self.execute_cpu_tf1(compute_fn, inputs, graph)
def execute_tpu(self, compute_fn, inputs, graph=None):
"""Executes compute_fn on TPU.
Depending on the underlying TensorFlow installation (build deps) runs in
either TF 1.X or TF 2.X style.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
graph: (optional) If not None, provided `graph` is used for computation
instead of a brand new tf.Graph().
Returns:
A list of numpy arrays or a single tensor.
"""
if not self.has_tpu():
raise ValueError('No TPU Device found.')
if self.is_tf2():
return self.execute_tpu_tf2(compute_fn, inputs)
else:
return self.execute_tpu_tf1(compute_fn, inputs, graph)
def execute_tf2(self, compute_fn, inputs):
"""Runs compute_fn with TensorFlow 2.0.
Executes on TPU if available, otherwise executes on CPU.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
Returns:
A list of numpy arrays or a single tensor.
"""
if not self.is_tf2():
raise ValueError('Required version TensorFlow 2.0 is not available.')
if self.has_tpu():
return self.execute_tpu_tf2(compute_fn, inputs)
else:
return self.execute_cpu_tf2(compute_fn, inputs)
def execute_tf1(self, compute_fn, inputs, graph=None):
"""Runs compute_fn with TensorFlow 1.X.
Executes on TPU if available, otherwise executes on CPU.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
graph: (optional) If not None, provided `graph` is used for computation
instead of a brand new tf.Graph().
Returns:
A list of numpy arrays or a single tensor.
"""
if self.is_tf2():
raise ValueError('Required version Tenforflow 1.X is not available.')
if self.has_tpu():
return self.execute_tpu_tf1(compute_fn, inputs, graph)
else:
return self.execute_cpu_tf1(compute_fn, inputs, graph)
def execute(self, compute_fn, inputs, graph=None):
"""Runs compute_fn with inputs and returns results.
* Executes in either TF1.X or TF2.X style based on the TensorFlow version.
* Executes on TPU if available, otherwise executes on CPU.
Args:
compute_fn: a function containing Tensorflow computation that takes a list
of input numpy tensors, performs computation and returns output numpy
tensors.
inputs: a list of numpy arrays to feed input to the `compute_fn`.
graph: (optional) If not None, provided `graph` is used for computation
instead of a brand new tf.Graph().
Returns:
A list of numpy arrays or a single tensor.
"""
if self.has_tpu() and tf2.enabled():
return self.execute_tpu_tf2(compute_fn, inputs)
elif not self.has_tpu() and tf2.enabled():
return self.execute_cpu_tf2(compute_fn, inputs)
elif self.has_tpu() and not tf2.enabled():
return self.execute_tpu_tf1(compute_fn, inputs, graph)
else:
return self.execute_cpu_tf1(compute_fn, inputs, graph)
|
finite_ntk/finite_ntk/lazy/fvp_second_order.py | forgi86/xfer | 244 | 12637648 | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
import torch
from gpytorch.lazy import LazyTensor
from .utils import flatten, unflatten_like
class FVP_AG(LazyTensor):
def __init__(self, model, data, **kwargs):
r"""
FVP_AG is a class representing a Fisher matrix of a model on a set of data, given
that the probability model for the data is
p(y | model(data)) = Categorical(model(data)). Rather than
forming the entire Fisher information matrix, we compute it with matrix vector products
using second order autograd (hence the AG name).
model: model class
data: data that the Fisher information is to be calulated on
epsilon: hyper-parameter
"""
super(FVP_AG, self).__init__(data)
self.model = model
self.data = data
# compute number of paraemters
self.num_params = 0
for p in self.model.parameters():
self.num_params += p.numel()
def _size(self, val=None):
if val == 0 or val == 1:
return self.num_params
else:
return (self.num_params, self.num_params)
def _transpose_nonbatch(self):
return self
# A loss whose 2nd derivative is the Fisher information matrix
# Thanks for <NAME> for deriving this loss fn.
def detached_entropy(self, logits, y=None):
# -1*\frac{1}{m}\sum_{i,k} [f_k(x_i)] \log f_k(x_i), where [] is detach
log_probs = torch.nn.LogSoftmax(dim=1)(logits)
probs = torch.nn.Softmax(dim=1)(logits)
return -1 * (probs.detach() * log_probs).sum(1).mean(0)
def _matmul(self, rhs):
orig_dtype = rhs.dtype
rhs = rhs.float()
vec = rhs.t() # transpose
# check if all norms are zeros and return a zero matrix otherwise
if torch.norm(vec, dim=0).eq(0.0).all():
return torch.zeros(
self.num_params, rhs.size(1), device=rhs.device, dtype=rhs.dtype
)
# form list of all vectors
with torch.autograd.no_grad():
vec_list = []
for v in vec:
vec_list.append(unflatten_like(v, self.model.parameters()))
with torch.autograd.enable_grad():
# compute batch loss with detached entropy
batch_loss = self.detached_entropy(self.model(self.data))
# first gradient wrt parameters
grad_bl_list = torch.autograd.grad(
batch_loss, self.model.parameters(), create_graph=True, only_inputs=True
)
res = []
for vec_sublist in vec_list:
deriv = 0
for vec_part, grad_part in zip(vec_sublist, grad_bl_list):
deriv += torch.sum(vec_part.detach().double() * grad_part.double())
# fast implicit hvp product
hvp_list = torch.autograd.grad(
deriv.float(),
self.model.parameters(),
only_inputs=True,
retain_graph=True,
)
res.append(flatten(hvp_list))
res_matrix = torch.stack(res).detach()
return res_matrix.t().type(orig_dtype)
|
recipes/Python/578594_Send_Messages__millions_facebook_Users_/recipe-578594.py | tdiprima/code | 2,023 | 12637658 | <reponame>tdiprima/code
#usr/bin/env/python
"""
This script can get the user data from facebook.com.
This is written for better understanding of python
Modules required:BeautifulSoup
Author:<NAME> aka Cybercam
Blog:http://pythonnotesbyajay.blogspot.in/
"""
import smtplib
import email
from email.MIMEMultipart import MIMEMultipart
from email.parser import Parser
from email.MIMEText import MIMEText
import urllib2
from BeautifulSoup import BeautifulSoup
import random
user_name_array=[]
def get_fb_username(id):
try:
url=urllib2.urlopen('https://graph.facebook.com/'+str(id)).read()
soup = BeautifulSoup(url)
all_attr=soup.prettify()
print all_attr
gend=all_attr.find("gender")
if(all_attr[gend+9] == 'm'):
gender='male'
elif (all_attr[gend+9] == 'f'):
gender = 'female'
else:
gender="The user didn't specify any gender"
if all_attr.find('username') != -1:
start_quote=all_attr.find('username')+10
end_quote=all_attr.find('"',start_quote+1)
user_name=all_attr[start_quote:end_quote+1].strip('"')+'@facebook.com'
user_name_array.append(user_name)
print "username ==>"+'\t'+user_name +'\t'+ "gender ==>"+"\t"+gender
print "\n"
except urllib2.HTTPError:
pass
for i in range(4,10,1):
#for i in range(startvalue,stopvalue,stepvalue):
get_fb_username(i+1)
print user_name_array
def send_mail():
random_text=["hi","hello","Nice to meet you","How are you","wassup","hi!!!",'just wanted to say hi']
server = smtplib.SMTP()
server.connect('smtp.gmail.com', 587) # for eg. host = 'smtp.gmail.com', port = 587
server.ehlo()
server.starttls()
server.login('<EMAIL>', 'password')
#replace this with u r gmail id
#password ==> <PASSWORD>
fromaddr ='<EMAIL>'
for i in range(len(user_name_array)-1):
msg = email.MIMEMultipart.MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = user_name_array[i]
msg['Subject'] = 'hi'
msg.attach(MIMEText(random_text[random.randint(0,len(random_text)-1)]))
#msg.attach(MIMEText('put some custom message.', 'plain'))
server.sendmail(fromaddr,user_name_array[i],msg.as_string())
server.quit()
send_mail()
|
testing/slides/examples/fixtures/test_fixtures.py | ramosmaria/school2021 | 252 | 12637662 | import pytest
@pytest.fixture(scope='session')
def some_data():
return [1, 2, 3]
def test_using_fixture(some_data):
assert len(some_data) == 3
def test_also_using_fixture(some_data):
assert some_data[0] == 1
|
mode/examples/Topics/Fractals and L-Systems/PenroseTile/l_system.py | timgates42/processing.py | 1,224 | 12637687 | <filename>mode/examples/Topics/Fractals and L-Systems/PenroseTile/l_system.py
class LSystem(object):
def __init__(self):
self.steps = 0
self.axiom = "F"
self.rule = "F+F-F"
self.startLength = 190.0
self.theta = radians(120.0)
self.reset()
def reset(self):
self.production = self.axiom
self.drawLength = self.startLength
self.generations = 0
def getAge(self):
return self.generations
def render(self):
translate(width / 2, height / 2)
self.steps += 5
if self.steps > len(self.production)():
self.steps = len(self.production)()
for i in range(self.steps):
step = self.production.charAt(i)
if step == 'F':
rect(0, 0, -self.drawLength, -self.drawLength)
noFill()
translate(0, -self.drawLength)
elif step == '+':
rotate(self.theta)
elif step == '-':
rotate(-self.theta)
elif step == '[':
pushMatrix()
elif step == ']':
popMatrix()
def simulate(self, gen):
while self.getAge() < gen:
self.production = self.iterate(self.production, self.rule)
def iterate(self, prod_, rule_):
self.drawLength = self.drawLength * 0.6
self.generations += 1
newProduction = prod_
newProduction = newProduction.replaceAll("F", rule_)
return newProduction
|
nucleus/examples/print_tfrecord.py | gaybro8777/nucleus | 721 | 12637697 | <gh_stars>100-1000
# Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Prints a TFRecord file created by Nucleus.
Usage:
print_tfrecord <filename> <proto_name>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from absl import app
from google.protobuf import text_format
from nucleus.io import genomics_reader
from nucleus.protos import bed_pb2
from nucleus.protos import bedgraph_pb2
from nucleus.protos import fasta_pb2
from nucleus.protos import fastq_pb2
from nucleus.protos import gff_pb2
from nucleus.protos import reads_pb2
from nucleus.protos import variants_pb2
# pylint: disable=g-direct-tensorflow-import
from nucleus.protos import example_pb2
PROTO_DB = {
'BedGraphRecord': bedgraph_pb2.BedGraphRecord,
'BedRecord': bed_pb2.BedRecord,
'FastaRecord': fasta_pb2.FastaRecord,
'FastqRecord': fastq_pb2.FastqRecord,
'GffRecord': gff_pb2.GffRecord,
'Read': reads_pb2.Read,
'Variant': variants_pb2.Variant,
'Example': example_pb2.Example
}
def main(argv):
if len(argv) != 3:
print('Usage: {} <filename> <proto_name>\n'.format(argv[0]))
sys.exit(-1)
filename = argv[1]
proto_name = argv[2]
if proto_name not in PROTO_DB:
print('Unknown protocol buffer name {}\n'.format(proto_name))
print('Known names are: {}\n'.format(' '.join(PROTO_DB.keys())))
sys.exit(-1)
proto = PROTO_DB[proto_name]
with genomics_reader.TFRecordReader(filename, proto=proto) as reader:
for record in reader:
print(text_format.MessageToString(record))
if __name__ == '__main__':
app.run(main)
|
cupy_alias/sparse/dia.py | fixstars/clpy | 142 | 12637698 | <gh_stars>100-1000
from clpy.sparse.dia import * # NOQA
|
cpmpy/among.py | hakank/hakank | 279 | 12637714 | <gh_stars>100-1000
"""
Global constraint among in cpmpy.
'''
Requires exactly m variables in x to take one of the values in v.
'''
Model created by <NAME>, <EMAIL>
See also my cpmpy page: http://www.hakank.org/cpmpy/
"""
import sys
import numpy as np
from cpmpy import *
from cpmpy.solvers import *
from cpmpy_hakank import *
def among_test():
n = 5 # length of x
m = 3 # number of values
v = [1,5,8]
# variables
x = intvar(1,8,shape=n,name="x")
# constraints
model = Model(among(m, x,v))
ortools_wrapper2(model,[x])
among_test()
|
tests/test_cookbook.py | craiga/pyexcel | 1,045 | 12637720 | import os
import pyexcel as pe
from base import clean_up_files
from nose.tools import eq_, raises
class TestSpliting:
def setUp(self):
self.testfile4 = "multiple_sheets.xls"
self.content4 = {
"Sheet1": [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]],
"Sheet2": [[4, 4, 4, 4], [5, 5, 5, 5], [6, 6, 6, 6]],
"Sheet3": [[u"X", u"Y", u"Z"], [1, 4, 7], [2, 5, 8], [3, 6, 9]],
}
pe.save_book_as(dest_file_name=self.testfile4, bookdict=self.content4)
def test_split_a_book(self):
pe.cookbook.split_a_book(self.testfile4, "extracted.csv")
assert os.path.exists("Sheet1_extracted.csv")
assert os.path.exists("Sheet2_extracted.csv")
assert os.path.exists("Sheet3_extracted.csv")
def test_split_a_book_2(self):
"""use default output file name"""
pe.cookbook.split_a_book(self.testfile4)
assert os.path.exists("Sheet1_%s" % self.testfile4)
assert os.path.exists("Sheet2_%s" % self.testfile4)
assert os.path.exists("Sheet3_%s" % self.testfile4)
def test_extract_a_book(self):
pe.cookbook.extract_a_sheet_from_a_book(
self.testfile4, "Sheet1", "extracted.csv"
)
assert os.path.exists("Sheet1_extracted.csv")
def test_extract_a_book_2(self):
"""Use default output file name"""
pe.cookbook.extract_a_sheet_from_a_book(self.testfile4, "Sheet1")
assert os.path.exists("Sheet1_%s" % self.testfile4)
def tearDown(self):
file_list = [
self.testfile4,
"Sheet1_extracted.csv",
"Sheet2_extracted.csv",
"Sheet3_extracted.csv",
"Sheet1_multiple_sheets.xls",
"Sheet2_multiple_sheets.xls",
"Sheet3_multiple_sheets.xls",
]
clean_up_files(file_list)
class TestCookbook:
def setUp(self):
"""
Make a test csv file as:
1,1,1,1
2,2,2,2
3,3,3,3
"""
self.testfile = "test1.xls"
self.content = {
"X": [1, 2, 3, 4, 5],
"Y": [6, 7, 8, 9, 10],
"Z": [11, 12, 13, 14, 15],
}
pe.save_as(dest_file_name=self.testfile, adict=self.content)
self.testfile2 = "test.csv"
self.content2 = {
"O": [1, 2, 3, 4, 5],
"P": [6, 7, 8, 9, 10],
"Q": [11, 12, 13, 14, 15],
}
pe.save_as(dest_file_name=self.testfile2, adict=self.content2)
self.testfile3 = "test.xls"
self.content3 = {
"R": [1, 2, 3, 4, 5],
"S": [6, 7, 8, 9, 10],
"T": [11, 12, 13, 14, 15],
}
pe.save_as(dest_file_name=self.testfile3, adict=self.content3)
self.testfile4 = "multiple_sheets.xls"
self.content4 = {
"Sheet1": [[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]],
"Sheet2": [[4, 4, 4, 4], [5, 5, 5, 5], [6, 6, 6, 6]],
"Sheet3": [[u"X", u"Y", u"Z"], [1, 4, 7], [2, 5, 8], [3, 6, 9]],
}
pe.save_book_as(dest_file_name=self.testfile4, bookdict=self.content4)
@raises(ValueError)
def test_update_columns(self):
bad_column = {"A": [31, 1, 1, 1, 1]}
# try non-existent column first
pe.cookbook.update_columns(self.testfile, bad_column)
@raises(NotImplementedError)
def test_update_columns2(self):
custom_column = {"Z": [33, 44, 55, 66, 77]}
pe.cookbook.update_columns(self.testfile, custom_column)
r = pe.SeriesReader("pyexcel_%s" % self.testfile)
data = r.dict
assert data["Z"] == custom_column["Z"]
pe.cookbook.update_columns(self.testfile, custom_column, "test4.xls")
r = pe.SeriesReader("test4.xls")
data = r.dict
assert data["Z"] == custom_column["Z"]
# test if it try not overwrite a file
pe.cookbook.update_columns(self.testfile, custom_column) # bang
def test_update_rows(self):
bad_column = {100: [31, 1, 1, 1, 1]}
custom_column = {"1": [3, 4]}
try:
# try non-existent column first
pe.cookbook.update_rows(self.testfile, bad_column)
assert 1 == 2
except ValueError:
assert 1 == 1
pe.cookbook.update_rows(self.testfile, custom_column)
r = pe.Reader("pyexcel_%s" % self.testfile)
assert custom_column["1"] == r.row_at(1)[1:]
try:
# try not to overwrite a file
pe.cookbook.update_rows(self.testfile, custom_column)
r = pe.SeriesReader("pyexcel_%s" % self.testfile)
assert 1 == 2
except NotImplementedError:
assert 1 == 1
pe.cookbook.update_rows(self.testfile, custom_column, "test4.xls")
r = pe.Reader("test4.xls")
assert custom_column["1"] == r.row_at(1)[1:]
@raises(NotImplementedError)
def test_merge_two_files(self):
pe.cookbook.merge_two_files(self.testfile, self.testfile2)
r = pe.SeriesReader("pyexcel_merged.csv")
r.format(int)
content = {}
content.update(self.content)
content.update(self.content2)
eq_(r.dict, content)
pe.cookbook.merge_two_files(self.testfile, self.testfile2) # bang
@raises(NotImplementedError)
def test_merge_files(self):
file_array = [self.testfile, self.testfile2, self.testfile3]
pe.cookbook.merge_files(file_array)
r = pe.SeriesReader("pyexcel_merged.csv")
r.format(int)
content = {}
content.update(self.content)
content.update(self.content2)
content.update(self.content3)
eq_(r.dict, content)
pe.cookbook.merge_files(file_array) # bang, do not overwrite
@raises(NotImplementedError)
def test_merge_two_readers(self):
r1 = pe.SeriesReader(self.testfile)
r2 = pe.SeriesReader(self.testfile2)
pe.cookbook.merge_two_readers(r1, r2)
r = pe.SeriesReader("pyexcel_merged.csv")
r.format(int)
content = {}
content.update(self.content)
content.update(self.content2)
eq_(r.dict, content)
pe.cookbook.merge_two_readers(r1, r2) # bang, do not overwrite
@raises(NotImplementedError)
def test_merge_readers(self):
r1 = pe.SeriesReader(self.testfile)
r2 = pe.SeriesReader(self.testfile2)
r3 = pe.SeriesReader(self.testfile3)
file_array = [r1, r2, r3]
pe.cookbook.merge_readers(file_array)
r = pe.SeriesReader("pyexcel_merged.csv")
r.format(int)
content = {}
content.update(self.content)
content.update(self.content2)
content.update(self.content3)
eq_(r.dict, content)
pe.cookbook.merge_readers(file_array) # bang, do not overwrite
def test_merge_two_row_filter_hat_readers(self):
r1 = pe.SeriesReader(self.testfile)
r2 = pe.SeriesReader(self.testfile2)
pe.cookbook.merge_two_readers(r1, r2)
r = pe.SeriesReader("pyexcel_merged.csv")
r.format(int)
content = {}
content.update(self.content)
content.update(self.content2)
eq_(r.dict, content)
def test_merge_any_files_to_a_book(self):
file_array = [
self.testfile,
self.testfile2,
self.testfile3,
self.testfile4,
]
pe.cookbook.merge_all_to_a_book(file_array, "merged.xlsx")
r = pe.BookReader("merged.xlsx")
r[self.testfile].name_columns_by_row(0)
content = r[self.testfile].to_dict()
assert content == self.content
r[self.testfile2].format(int)
r[self.testfile2].name_columns_by_row(0)
content2 = r[self.testfile2].to_dict()
assert content2 == self.content2
r[self.testfile3].name_columns_by_row(0)
content3 = r[self.testfile3].to_dict()
assert content3 == self.content3
content4 = r["Sheet1"].to_array()
assert content4 == self.content4["Sheet1"]
content5 = r["Sheet2"].to_array()
assert content5 == self.content4["Sheet2"]
content6 = r["Sheet3"].to_array()
assert content6 == self.content4["Sheet3"]
def test_merge_csv_files_to_a_book(self):
file_array = [self.testfile, self.testfile2, self.testfile3]
pe.cookbook.merge_csv_to_a_book(file_array, "merged.xlsx")
r = pe.BookReader("merged.xlsx")
r[self.testfile].name_columns_by_row(0)
content = r[self.testfile].to_dict()
assert content == self.content
r[self.testfile2].format(int)
r[self.testfile2].name_columns_by_row(0)
content2 = r[self.testfile2].to_dict()
assert content2 == self.content2
r[self.testfile3].name_columns_by_row(0)
content3 = r[self.testfile3].to_dict()
assert content3 == self.content3
def tearDown(self):
file_list = [
self.testfile,
self.testfile2,
self.testfile3,
self.testfile4,
"pyexcel_%s" % self.testfile,
"pyexcel_merged.csv",
"merged.xlsx",
"merged.xls",
"test4.xls",
]
clean_up_files(file_list)
|
jax-stubs/setup.py | deepmind/tensor_annotations | 117 | 12637721 | #!/usr/bin/env python
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Install script for JAX stubs."""
import os
import pathlib
import shutil
import tempfile
import setuptools
# Note: Copybara takes care of moving files to 'jax-stubs/'.
setuptools.setup(
name='tensor-annotations-jax-stubs',
version='1.0.0',
description='Shape-aware type stubs for JAX.',
long_description='Shape-aware types stubs for JAX. See the `tensor-annotations` package.',
long_description_content_type='text/markdown',
url='https://github.com/deepmind/tensor_annotations',
packages=['jax-stubs'],
package_data={'jax-stubs': ['*.pyi', '*/*.pyi']},
install_requires=['tensor-annotations'],
)
|
applications/DEMApplication/python_scripts/processes/apply_forces_and_moments_process.py | lkusch/Kratos | 778 | 12637723 | import KratosMultiphysics
# Import applications
import KratosMultiphysics.DEMApplication as DEM
# Other imports
def Factory(settings, Model):
if(type(settings) != KratosMultiphysics.Parameters):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
process_settings = settings["Parameters"]
folder_settings = KratosMultiphysics.Parameters("""{
"help" : "This process applies loads over the particles in a certain submodelpart, for a certain time interval",
"mesh_id" : 0,
"model_part_name" : "please_specify_model_part_name",
"force_settings" : {
"value" : [10.0, "3*t", "x+y"],
"table" : [0, 0, 0]
},
"moment_settings" : {
"value" : [10.0, "3*t", "x+y"],
"table" : [0, 0, 0]
},
"interval" : [0.0, 1e30]
}""" )
process_settings.AddMissingParameters(folder_settings)
if process_settings.Has("model_part_name"):
computing_model_part = Model[process_settings["model_part_name"].GetString()]
else: # using default name
computing_model_part = Model["DEM"]
process_settings.RemoveValue("help")
return DEM.ApplyForcesAndMomentsProcess(computing_model_part, process_settings)
|
Python-3/basic_examples/strings/string_encode_decode.py | ghiloufibelgacem/jornaldev | 1,139 | 12637728 | str_original = 'Hello'
bytes_encoded = str_original.encode(encoding='utf-8')
print(type(bytes_encoded))
str_decoded = bytes_encoded.decode()
print(type(str_decoded))
print('Encoded bytes =', bytes_encoded)
print('Decoded String =', str_decoded)
print('str_original equals str_decoded =', str_original == str_decoded)
str_original = input('Please enter string data:\n')
bytes_encoded = str_original.encode()
str_decoded = bytes_encoded.decode()
print('Encoded bytes =', bytes_encoded)
print('Decoded String =', str_decoded)
print('str_original equals str_decoded =', str_original == str_decoded) |
decode.py | nyu-dl/dl4mt-nonauto | 117 | 12637737 | import copy
import ipdb
import math
import os
import torch
import numpy as np
import time
from torch.nn import functional as F
from torch.autograd import Variable
from tqdm import tqdm, trange
from model import Transformer, FastTransformer, INF, TINY, softmax
from data import NormalField, NormalTranslationDataset, TripleTranslationDataset, ParallelDataset, data_path
from utils import Metrics, Best, computeBLEU, computeBLEUMSCOCO, Batch, masked_sort, computeGroupBLEU, organise_trg_len_dic, make_decoder_masks, \
double_source_masks, remove_repeats, remove_repeats_tensor, print_bleu, oracle_converged, equality_converged, jaccard_converged
from time import gmtime, strftime
import copy
from multiset import Multiset
tokenizer = lambda x: x.replace('@@ ', '').split()
def run_fast_transformer(decoder_inputs, decoder_masks,\
sources, source_masks,\
targets,\
encoding,\
model, args, use_argmax=True):
trg_unidx = model.output_decoding( ('trg', targets) )
batch_size, src_len, hsize = encoding[0].size()
all_decodings = []
all_probs = []
iter_ = 0
bleu_hist = [ [] for xx in range(batch_size) ]
output_hist = [ [] for xx in range(batch_size) ]
multiset_hist = [ [] for xx in range(batch_size) ]
num_iters = [ 0 for xx in range(batch_size) ]
done_ = [False for xx in range(batch_size)]
final_decoding = [ None for xx in range(batch_size) ]
while True:
curr_iter = min(iter_, args.num_decs-1)
next_iter = min(iter_+1, args.num_decs-1)
decoding, out, probs = model(encoding, source_masks, decoder_inputs, decoder_masks,
decoding=True, return_probs=True, iter_=curr_iter)
dec_output = decoding.data.cpu().numpy().tolist()
"""
if args.trg_len_option != "reference":
decoder_masks = 0. * decoder_masks
for bidx in range(batch_size):
try:
decoder_masks[bidx,:(dec_output[bidx].index(3))+1] = 1.
except:
decoder_masks[bidx,:] = 1.
"""
if args.adaptive_decoding == "oracle":
out_unidx = model.output_decoding( ('trg', decoding ) )
sentence_bleus = computeBLEU(out_unidx, trg_unidx, corpus=False, tokenizer=tokenizer)
for bidx in range(batch_size):
output_hist[bidx].append( dec_output[bidx] )
bleu_hist[bidx].append(sentence_bleus[bidx])
converged = oracle_converged( bleu_hist, num_items=args.adaptive_window )
for bidx in range(batch_size):
if not done_[bidx] and converged[bidx] and num_iters[bidx] == 0:
num_iters[bidx] = iter_ + 1 - (args.adaptive_window -1)
done_[bidx] = True
final_decoding[bidx] = output_hist[bidx][-args.adaptive_window]
elif args.adaptive_decoding == "equality":
for bidx in range(batch_size):
#if 3 in dec_output[bidx]:
# dec_output[bidx] = dec_output[bidx][:dec_output[bidx].index(3)]
output_hist[bidx].append( dec_output[bidx] )
converged = equality_converged( output_hist, num_items=args.adaptive_window )
for bidx in range(batch_size):
if not done_[bidx] and converged[bidx] and num_iters[bidx] == 0:
num_iters[bidx] = iter_ + 1
done_[bidx] = True
final_decoding[bidx] = output_hist[bidx][-1]
elif args.adaptive_decoding == "jaccard":
for bidx in range(batch_size):
#if 3 in dec_output[bidx]:
# dec_output[bidx] = dec_output[bidx][:dec_output[bidx].index(3)]
output_hist[bidx].append( dec_output[bidx] )
multiset_hist[bidx].append( Multiset(dec_output[bidx]) )
converged = jaccard_converged( multiset_hist, num_items=args.adaptive_window )
for bidx in range(batch_size):
if not done_[bidx] and converged[bidx] and num_iters[bidx] == 0:
num_iters[bidx] = iter_ + 1
done_[bidx] = True
final_decoding[bidx] = output_hist[bidx][-1]
all_decodings.append( decoding )
all_probs.append(probs)
decoder_inputs = 0
if args.next_dec_input in ["both", "emb"]:
if use_argmax:
_, argmax = torch.max(probs, dim=-1)
else:
probs_sz = probs.size()
probs_ = Variable(probs.data, requires_grad=False)
argmax = torch.multinomial(probs_.contiguous().view(-1, probs_sz[-1]), 1).view(*probs_sz[:-1])
emb = F.embedding(argmax, model.decoder[next_iter].out.weight * math.sqrt(args.d_model))
decoder_inputs += emb
if args.next_dec_input in ["both", "out"]:
decoder_inputs += out
iter_ += 1
if iter_ == args.valid_repeat_dec or (False not in done_):
break
if args.adaptive_decoding != None:
for bidx in range(batch_size):
if num_iters[bidx] == 0:
num_iters[bidx] = 20
if final_decoding[bidx] == None:
if args.adaptive_decoding == "oracle":
final_decoding[bidx] = output_hist[bidx][np.argmax(bleu_hist[bidx])]
else:
final_decoding[bidx] = output_hist[bidx][-1]
decoding = Variable(torch.LongTensor(np.array(final_decoding)))
if decoder_masks.is_cuda:
decoding = decoding.cuda()
return decoding, all_decodings, num_iters, all_probs
def decode_model(args, model, dev, evaluate=True, trg_len_dic=None,
decoding_path=None, names=None, maxsteps=None):
args.logger.info("decoding, f_size={}, beam_size={}, alpha={}".format(args.f_size, args.beam_size, args.alpha))
dev.train = False # make iterator volatile=True
if not args.no_tqdm:
progressbar = tqdm(total=200, desc='start decoding')
model.eval()
if not args.debug:
decoding_path.mkdir(parents=True, exist_ok=True)
handles = [(decoding_path / name ).open('w') for name in names]
corpus_size = 0
src_outputs, trg_outputs, dec_outputs, timings = [], [], [], []
all_decs = [ [] for idx in range(args.valid_repeat_dec)]
decoded_words, target_words, decoded_info = 0, 0, 0
attentions = None
decoder = model.decoder[0] if args.model is FastTransformer else model.decoder
pad_id = decoder.field.vocab.stoi['<pad>']
eos_id = decoder.field.vocab.stoi['<eos>']
curr_time = 0
cum_sentences = 0
cum_tokens = 0
cum_images = 0 # used for mscoco
num_iters_total = []
for iters, dev_batch in enumerate(dev):
start_t = time.time()
if args.dataset != "mscoco":
decoder_inputs, decoder_masks,\
targets, target_masks,\
sources, source_masks,\
encoding, batch_size, rest = model.quick_prepare(dev_batch, fast=(type(model) is FastTransformer), trg_len_option=args.trg_len_option, trg_len_ratio=args.trg_len_ratio, trg_len_dic=trg_len_dic, bp=args.bp)
else:
# only use first caption for calculating log likelihood
all_captions = dev_batch[1]
dev_batch[1] = dev_batch[1][0]
decoder_inputs, decoder_masks,\
targets, target_masks,\
_, source_masks,\
encoding, batch_size, rest = model.quick_prepare_mscoco(dev_batch, all_captions=all_captions, fast=(type(model) is FastTransformer), inputs_dec=args.inputs_dec, trg_len_option=args.trg_len_option, max_len=args.max_len, trg_len_dic=trg_len_dic, bp=args.bp, gpu=args.gpu>-1)
sources = None
cum_sentences += batch_size
batch_size, src_len, hsize = encoding[0].size()
# for now
if type(model) is Transformer:
all_decodings = []
decoding = model(encoding, source_masks, decoder_inputs, decoder_masks,
beam=args.beam_size, alpha=args.alpha, \
decoding=True, feedback=attentions)
all_decodings.append( decoding )
num_iters = [0]
elif type(model) is FastTransformer:
decoding, all_decodings, num_iters, argmax_all_probs = run_fast_transformer(decoder_inputs, decoder_masks, \
sources, source_masks, targets, encoding, model, args, use_argmax=True)
num_iters_total.extend( num_iters )
if not args.use_argmax:
for _ in range(args.num_samples):
_, _, _, sampled_all_probs = run_fast_transformer(decoder_inputs, decoder_masks, \
sources, source_masks, encoding, model, args, use_argmax=False)
for iter_ in range(args.valid_repeat_dec):
argmax_all_probs[iter_] = argmax_all_probs[iter_] + sampled_all_probs[iter_]
all_decodings = []
for iter_ in range(args.valid_repeat_dec):
argmax_all_probs[iter_] = argmax_all_probs[iter_] / args.num_samples
all_decodings.append(torch.max(argmax_all_probs[iter_], dim=-1)[-1])
decoding = all_decodings[-1]
used_t = time.time() - start_t
curr_time += used_t
if args.dataset != "mscoco":
if args.remove_repeats:
outputs_unidx = [model.output_decoding(d) for d in [('src', sources), ('trg', targets), ('trg', remove_repeats_tensor(decoding))]]
else:
outputs_unidx = [model.output_decoding(d) for d in [('src', sources), ('trg', targets), ('trg', decoding)]]
else:
# make sure that 5 captions per each example
num_captions = len(all_captions[0])
for c in range(1, len(all_captions)):
assert (num_captions == len(all_captions[c]))
# untokenize reference captions
for n_ref in range(len(all_captions)):
n_caps = len(all_captions[0])
for c in range(n_caps):
all_captions[n_ref][c] = all_captions[n_ref][c].replace("@@ ","")
outputs_unidx = [ list(map(list, zip(*all_captions))) ]
if args.remove_repeats:
all_dec_outputs = [model.output_decoding(d) for d in [('trg', remove_repeats_tensor(all_decodings[ii])) for ii in range(len(all_decodings))]]
else:
all_dec_outputs = [model.output_decoding(d) for d in [('trg', all_decodings[ii]) for ii in range(len(all_decodings))]]
corpus_size += batch_size
if args.dataset != "mscoco":
cum_tokens += sum([len(xx.split(" ")) for xx in outputs_unidx[0]]) # NOTE source tokens, not target
if args.dataset != "mscoco":
src_outputs += outputs_unidx[0]
trg_outputs += outputs_unidx[1]
if args.remove_repeats:
dec_outputs += remove_repeats(outputs_unidx[-1])
else:
dec_outputs += outputs_unidx[-1]
else:
trg_outputs += outputs_unidx[0]
for idx, each_output in enumerate(all_dec_outputs):
if args.remove_repeats:
all_decs[idx] += remove_repeats(each_output)
else:
all_decs[idx] += each_output
#if True:
if False and decoding_path is not None:
for sent_i in range(len(outputs_unidx[0])):
if args.dataset != "mscoco":
print ('SRC')
print (outputs_unidx[0][sent_i])
for ii in range(len(all_decodings)):
print ('DEC iter {}'.format(ii))
print (all_dec_outputs[ii][sent_i])
print ('TRG')
print (outputs_unidx[1][sent_i])
else:
print ('TRG')
trg = outputs_unidx[0]
for subsent_i in range(len(trg[sent_i])):
print ('TRG {}'.format(subsent_i))
print (trg[sent_i][subsent_i])
for ii in range(len(all_decodings)):
print ('DEC iter {}'.format(ii))
print (all_dec_outputs[ii][sent_i])
print ('---------------------------')
timings += [used_t]
if not args.debug:
for s, t, d in zip(outputs_unidx[0], outputs_unidx[1], outputs_unidx[2]):
s, t, d = s.replace('@@ ', ''), t.replace('@@ ', ''), d.replace('@@ ', '')
print(s, file=handles[0], flush=True)
print(t, file=handles[1], flush=True)
print(d, file=handles[2], flush=True)
if not args.no_tqdm:
progressbar.update(iters)
progressbar.set_description('finishing sentences={}/batches={}, \
length={}/average iter={}, speed={} sec/batch'.format(\
corpus_size, iters, src_len, np.mean(np.array(num_iters)), curr_time / (1 + iters)))
if evaluate:
for idx, each_dec in enumerate(all_decs):
if len(all_decs[idx]) != len(trg_outputs):
break
if args.dataset != "mscoco":
bleu_output = computeBLEU(each_dec, trg_outputs, corpus=True, tokenizer=tokenizer)
else:
bleu_output = computeBLEUMSCOCO(each_dec, trg_outputs, corpus=True, tokenizer=tokenizer)
args.logger.info("iter {} | {}".format(idx+1, print_bleu(bleu_output)))
if args.adaptive_decoding != None:
args.logger.info("----------------------------------------------")
args.logger.info("Average # iters {}".format(np.mean(num_iters_total)))
bleu_output = computeBLEU(dec_outputs, trg_outputs, corpus=True, tokenizer=tokenizer)
args.logger.info("Adaptive BLEU | {}".format(print_bleu(bleu_output)))
args.logger.info("----------------------------------------------")
args.logger.info("Decoding speed analysis :")
args.logger.info("{} sentences".format(cum_sentences))
if args.dataset != "mscoco":
args.logger.info("{} tokens".format(cum_tokens))
args.logger.info("{:.3f} seconds".format(curr_time))
args.logger.info("{:.3f} ms / sentence".format((curr_time / float(cum_sentences) * 1000)))
if args.dataset != "mscoco":
args.logger.info("{:.3f} ms / token".format((curr_time / float(cum_tokens) * 1000)))
args.logger.info("{:.3f} sentences / s".format(float(cum_sentences) / curr_time))
if args.dataset != "mscoco":
args.logger.info("{:.3f} tokens / s".format(float(cum_tokens) / curr_time))
args.logger.info("----------------------------------------------")
if args.decode_which > 0:
args.logger.info("Writing to special file")
parent = decoding_path / "speed" / "b_{}{}".format(args.beam_size if args.model is Transformer else args.valid_repeat_dec,
"" if args.model is Transformer else "_{}".format(args.adaptive_decoding != None))
args.logger.info(str(parent))
parent.mkdir(parents=True, exist_ok=True)
speed_handle = (parent / "results.{}".format(args.decode_which) ).open('w')
print("----------------------------------------------", file=speed_handle, flush=True)
print("Decoding speed analysis :", file=speed_handle, flush=True)
print("{} sentences".format(cum_sentences), file=speed_handle, flush=True)
if args.dataset != "mscoco":
print("{} tokens".format(cum_tokens), file=speed_handle, flush=True)
print("{:.3f} seconds".format(curr_time), file=speed_handle, flush=True)
print("{:.3f} ms / sentence".format((curr_time / float(cum_sentences) * 1000)), file=speed_handle, flush=True)
if args.dataset != "mscoco":
print("{:.3f} ms / token".format((curr_time / float(cum_tokens) * 1000)), file=speed_handle, flush=True)
print("{:.3f} sentences / s".format(float(cum_sentences) / curr_time), file=speed_handle, flush=True)
if args.dataset != "mscoco":
print("{:.3f} tokens / s".format(float(cum_tokens) / curr_time), file=speed_handle, flush=True)
print("----------------------------------------------", file=speed_handle, flush=True)
|
mlcomp/db/report_info/f1.py | sUeharaE4/mlcomp | 166 | 12637749 | import numpy as np
from sklearn.metrics import classification_report
from mlcomp.db.report_info.item import ReportLayoutItem
from mlcomp.utils.plot import figure_to_binary, plot_classification_report
class ReportLayoutF1(ReportLayoutItem):
def plot(self, y: np.array, pred: np.array):
report = classification_report(y, pred)
fig = plot_classification_report(report)
return figure_to_binary(fig, dpi=70)
__all__ = ['ReportLayoutF1']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.