hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dfca4efc48ecc61af82c78e3147819cd10885599
| 295 |
py
|
Python
|
sortColors/sortColors00.py
|
tnkteja/notthisagain
|
85e2b2cbea1298a052986e9dfe5e73d022b537f3
|
[
"MIT"
] | null | null | null |
sortColors/sortColors00.py
|
tnkteja/notthisagain
|
85e2b2cbea1298a052986e9dfe5e73d022b537f3
|
[
"MIT"
] | null | null | null |
sortColors/sortColors00.py
|
tnkteja/notthisagain
|
85e2b2cbea1298a052986e9dfe5e73d022b537f3
|
[
"MIT"
] | null | null | null |
from random import randint
A=[ randint(0,2) for _ in xrange(10000)]
print A
def Solution(A):
def swap(A,i,j):
tmp=A[i]
A[i]=A[j]
A[j]=tmp
i=0
k=len(A)-1
j=0
while j<k:
if A[j]==0:
swap(A,i,j)
i+=1
if A[j]==2:
swap(A,j,k)
k-=1
j+=1
return A
Solution(A)
print A
| 10.535714 | 40 | 0.545763 |
084342e7f9ed6a380b11f86ca7eaac8753fe6132
| 1,900 |
py
|
Python
|
api/serializers/roleplay.py
|
oil-rope/oil-and-rope
|
6d59c87d4809f120417a90c1624952085486bb06
|
[
"MIT"
] | 8 |
2019-08-27T20:08:22.000Z
|
2021-07-23T22:49:47.000Z
|
api/serializers/roleplay.py
|
oil-rope/oil-and-rope
|
6d59c87d4809f120417a90c1624952085486bb06
|
[
"MIT"
] | 73 |
2020-03-11T18:07:29.000Z
|
2022-03-28T18:07:47.000Z
|
api/serializers/roleplay.py
|
oil-rope/oil-and-rope
|
6d59c87d4809f120417a90c1624952085486bb06
|
[
"MIT"
] | 4 |
2020-02-22T19:44:17.000Z
|
2022-03-08T09:42:45.000Z
|
from django.apps import apps
from rest_framework import serializers
from common.constants import models
from .chat import ChatSerializer
from .common import MappedSerializerMixin
Domain = apps.get_model(models.DOMAIN_MODEL)
Place = apps.get_model(models.PLACE_MODEL)
Race = apps.get_model(models.RACE_MODEL)
Session = apps.get_model(models.SESSION_MODEL)
class DomainSerializer(serializers.ModelSerializer):
class Meta:
model = Domain
fields = (
'id', 'name', 'description', 'domain_type', 'image', 'entry_created_at', 'entry_updated_at',
)
class PlaceSerializer(serializers.ModelSerializer):
class Meta:
model = Place
fields = (
'id', 'name', 'description', 'site_type', 'image', 'parent_site', 'user', 'owner', 'entry_created_at',
'entry_updated_at',
)
class RaceSerializer(serializers.ModelSerializer):
owners = serializers.SerializerMethodField(method_name='get_owners')
def get_owners(self, obj):
owners_pk = obj.owners.values_list('pk', flat=True)
return list(owners_pk)
class Meta:
model = Race
fields = (
'id', 'name', 'description', 'strength', 'dexterity', 'constitution', 'intelligence', 'wisdom', 'charisma',
'affected_by_armor', 'image', 'users', 'owners',
)
class SessionSerializer(MappedSerializerMixin, serializers.ModelSerializer):
serializers_map = {
'chat': ChatSerializer(many=False, read_only=True)
}
game_masters = serializers.SerializerMethodField(method_name='get_game_masters')
def get_game_masters(self, obj):
gms_pk = obj.game_masters.values_list('pk', flat=True)
return list(gms_pk)
class Meta:
model = Session
fields = (
'id', 'name', 'players', 'chat', 'next_game', 'system', 'world', 'game_masters',
)
| 28.787879 | 119 | 0.661579 |
5d22ae78c20cfee4c9fff2a379aff924280485b3
| 143,912 |
py
|
Python
|
fbgemm_gpu/test/split_table_batched_embeddings_test.py
|
wfanzju/FBGEMM
|
a58ab65e27da27949ca0125683a6f83e1e464064
|
[
"BSD-3-Clause"
] | null | null | null |
fbgemm_gpu/test/split_table_batched_embeddings_test.py
|
wfanzju/FBGEMM
|
a58ab65e27da27949ca0125683a6f83e1e464064
|
[
"BSD-3-Clause"
] | null | null | null |
fbgemm_gpu/test/split_table_batched_embeddings_test.py
|
wfanzju/FBGEMM
|
a58ab65e27da27949ca0125683a6f83e1e464064
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# pyre-ignore-all-errors[56]
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
import pickle
import random
import unittest
from typing import Callable, List, Optional, Tuple, TypeVar
import fbgemm_gpu
import fbgemm_gpu.split_table_batched_embeddings_ops as split_table_batched_embeddings_ops
import hypothesis.strategies as st
import numpy as np
import torch
from fbgemm_gpu.split_table_batched_embeddings_ops import (
OptimType,
SparseType,
RecordCacheMetrics,
BoundsCheckMode,
)
open_source: bool = getattr(fbgemm_gpu, "open_source", False)
if open_source:
# pyre-ignore[21]
from test_utils import gpu_available, gpu_unavailable
else:
from fbgemm_gpu.test.test_utils import gpu_available, gpu_unavailable
from hypothesis import HealthCheck, Verbosity, assume, given, settings
from hypothesis.strategies import composite
from torch import Tensor
MAX_EXAMPLES = 40
# For long running tests reduce the number of iterations to reduce timeout errors.
MAX_EXAMPLES_LONG_RUNNING = 15
Deviceable = TypeVar("Deviceable", torch.nn.EmbeddingBag, Tensor)
@composite
# pyre-ignore
def get_nbit_weights_ty(draw) -> Optional[SparseType]:
"""
Returns None if mixed weights ty should be used, otherwise, returns specific SparseType.
"""
mixed_weights_ty = draw(st.booleans())
if mixed_weights_ty:
return None
return draw(
st.sampled_from(
[
SparseType.FP32,
SparseType.FP16,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
)
)
def round_up(a: int, b: int) -> int:
return int((a + b - 1) // b) * b
def get_offsets_from_dense(indices: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
(B, L) = indices.size()
return (
indices.contiguous().view(-1),
torch.tensor(
np.cumsum(np.asarray([0] + [L for _ in range(B)])[:-1]).astype(np.int64)
),
)
def to_device(t: Deviceable, use_cpu: bool) -> Deviceable:
# pyre-fixme[7]: Expected `Deviceable` but got `Union[Tensor,
# torch.nn.EmbeddingBag]`.
return t.cpu() if use_cpu else t.cuda()
def b_indices(
b: Callable[..., torch.Tensor],
x: torch.Tensor,
per_sample_weights: Optional[torch.Tensor] = None,
use_cpu: bool = False,
do_pooling: bool = True,
) -> torch.Tensor:
(indices, offsets) = get_offsets_from_dense(x)
if do_pooling:
return b(
to_device(indices, use_cpu),
to_device(offsets, use_cpu),
per_sample_weights=per_sample_weights,
)
else:
return b(to_device(indices, use_cpu))
def get_table_batched_offsets_from_dense(
merged_indices: torch.Tensor, use_cpu: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
(T, B, L) = merged_indices.size()
lengths = np.ones((T, B)) * L
flat_lengths = lengths.flatten()
return (
to_device(merged_indices.contiguous().view(-1), use_cpu),
to_device(
torch.tensor(([0] + np.cumsum(flat_lengths).tolist())).long(),
use_cpu,
),
)
def generate_requests(
iters: int,
B: int,
T: int,
L: int,
E: int,
# inter-batch indices reuse rate
reuse: float = 0.0,
# alpha <= 1.0: use uniform distribution
# alpha > 1.0: use zjpf distribution
alpha: float = 1.0,
weights_precision: SparseType = SparseType.FP32,
weighted: bool = False,
) -> List[Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]]:
if alpha <= 1.0:
all_indices = torch.randint(
low=0,
high=E,
size=(iters, T, B * L),
device=torch.cuda.current_device(),
dtype=torch.int32,
)
else:
all_indices = (
torch.as_tensor(np.random.zipf(a=alpha, size=(iters, T, B * L)))
.to(torch.cuda.current_device())
.int()
% E
)
for it in range(iters - 1):
for t in range(T):
reused_indices = torch.randperm(B * L, device=torch.cuda.current_device())[
: int(B * L * reuse)
]
all_indices[it + 1, t, reused_indices] = all_indices[it, t, reused_indices]
rs = []
for it in range(iters):
weight_tensor = (
None
if not weighted
else torch.randn(
T * B * L,
device=torch.cuda.current_device(),
dtype=torch.float16 if weights_precision else torch.float32,
)
)
rs.append(
get_table_batched_offsets_from_dense(all_indices[it].view(T, B, L))
+ (weight_tensor,)
)
return rs
def quantize_embs(
weight: Tensor, weight_ty: SparseType
) -> Tuple[Tensor, Optional[Tensor]]:
if weight_ty == SparseType.FP32:
q_weight = weight.float()
# FIXME: How to view the PyTorch Tensor as a different type (e.g., uint8)
# Here it uses numpy and it will introduce DtoH/HtoD overhead.
res_weight = torch.tensor(q_weight.cpu().numpy().view(np.uint8)).contiguous()
return (res_weight, None)
elif weight_ty == SparseType.FP16:
q_weight = weight.half()
res_weight = torch.tensor(q_weight.cpu().numpy().view(np.uint8)).contiguous()
return (res_weight, None)
elif weight_ty == SparseType.INT8:
q_weight = torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(weight)
res_weight = torch.tensor(q_weight[:, :-8].cpu().numpy().view(np.uint8))
res_scale_shift = torch.tensor(
q_weight[:, -8:]
.contiguous()
.cpu()
.numpy()
.view(np.float32)
.astype(np.float16)
.view(np.uint8)
) # [-4, -2]: scale; [-2:]: bias
return (res_weight, res_scale_shift)
elif weight_ty == SparseType.INT4 or weight_ty == SparseType.INT2:
q_weight = torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
weight,
bit_rate=weight_ty.bit_rate(),
)
res_weight = torch.tensor(q_weight[:, :-4].cpu().numpy().view(np.uint8))
res_scale_shift = torch.tensor(
q_weight[:, -4:].contiguous().cpu().numpy().view(np.uint8)
) # [-4, -2]: scale; [-2:]: bias
return (res_weight, res_scale_shift)
else:
raise RuntimeError("Unsupported SparseType: {}".format(weight_ty))
class SplitTableBatchedEmbeddingsTest(unittest.TestCase):
def execute_forward_(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weights_precision: SparseType,
weighted: bool,
mixed: bool,
use_cache: bool,
cache_algorithm: split_table_batched_embeddings_ops.CacheAlgorithm,
pooling_mode: split_table_batched_embeddings_ops.PoolingMode,
use_cpu: bool,
) -> None:
# NOTE: cache is not applicable to CPU version.
assume(not use_cpu or not use_cache)
# NOTE: limit (T * B * L * D) to avoid timeout for CPU version!
assume(not use_cpu or T * B * L * D <= 2048)
# NOTE: CPU does not support FP16.
assume(not (use_cpu and weights_precision == SparseType.FP16))
# NOTE: weighted operation can be done only for SUM.
assume(
pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM
or not weighted
)
# NOTE: No bag ops only work on GPUs, no mixed
assume(
not use_cpu
or pooling_mode != split_table_batched_embeddings_ops.PoolingMode.NONE
)
assume(
not mixed
or pooling_mode != split_table_batched_embeddings_ops.PoolingMode.NONE
)
emb_op = (
split_table_batched_embeddings_ops.SplitTableBatchedEmbeddingBagsCodegen
)
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM:
mode = "sum"
do_pooling = True
elif pooling_mode == split_table_batched_embeddings_ops.PoolingMode.MEAN:
mode = "mean"
do_pooling = True
elif pooling_mode == split_table_batched_embeddings_ops.PoolingMode.NONE:
mode = "sum"
do_pooling = False
else:
# This proves that we have exhaustively checked all PoolingModes
raise RuntimeError("Unknown PoolingMode!")
E = int(10 ** log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
D = D * 4
if not mixed:
Ds = [D] * T
Es = [int(1e4)] * T
else:
Ds = [
round_up(np.random.randint(low=int(0.25 * D), high=int(1.0 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
compute_device = split_table_batched_embeddings_ops.ComputeDevice.CUDA
if use_cpu:
managed = [split_table_batched_embeddings_ops.EmbeddingLocation.HOST] * T
compute_device = split_table_batched_embeddings_ops.ComputeDevice.CPU
elif use_cache:
managed = [
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED_CACHING
] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = (
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE
if d < average_D
else managed[t]
)
else:
managed = [
np.random.choice(
[
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE,
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED,
]
)
for _ in range(T)
]
if do_pooling:
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
else:
bs = [
to_device(torch.nn.Embedding(E, D, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
if weights_precision == SparseType.INT8:
for t in range(T):
bs[t].weight.data.copy_(
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(
torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(
bs[t].weight.data
)
)
)
if weights_precision == SparseType.FP16 and not use_cpu:
# NOTE: CPU version of torch.nn.EmbeddingBag doesn't support fp16.
bs = [b.half() for b in bs]
xs = [to_device(torch.randint(low=0, high=e, size=(B, L)), use_cpu) for e in Es]
xws = [to_device(torch.randn(size=(B, L)), use_cpu) for _ in range(T)]
xws_acc_type = copy.deepcopy(xws)
if weights_precision == SparseType.FP16 and not use_cpu:
# NOTE: CPU version of torch.nn.EmbeddingBag doesn't support fp16.
xws = [xw.half() for xw in xws]
fs = (
[
b_indices(b, x, use_cpu=use_cpu, do_pooling=do_pooling)
for (b, x) in zip(bs, xs)
]
if not weighted
else [
b_indices(
b,
x,
per_sample_weights=xw.view(-1),
use_cpu=use_cpu,
do_pooling=do_pooling,
)
for (b, x, xw) in zip(bs, xs, xws)
]
)
if do_pooling:
f = torch.cat([f.view(B, -1) for f in fs], dim=1)
else:
f = torch.cat(fs, dim=0).view(-1, D)
cc = emb_op(
embedding_specs=[
(
E,
D,
split_table_batched_embeddings_ops.EmbeddingLocation(M),
compute_device,
)
for (E, D, M) in zip(Es, Ds, managed)
],
weights_precision=weights_precision,
optimizer=OptimType.EXACT_SGD,
learning_rate=0.05,
cache_algorithm=cache_algorithm,
pooling_mode=pooling_mode,
)
# NOTE: test TorchScript-compatible!
cc = torch.jit.script(cc)
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(
bs[t].weight
if weights_precision != SparseType.INT8
else torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(bs[t].weight)
)
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu)
fc2 = (
cc(indices, offsets)
if not weighted
else cc(indices, offsets, to_device(xw.contiguous().view(-1), use_cpu))
)
torch.testing.assert_close(
fc2.float(),
f.float(),
atol=8.0e-3 if weights_precision == SparseType.FP16 else 1.0e-5,
rtol=8.0e-3 if weights_precision == SparseType.FP16 else 1.0e-5,
)
def test_forward_cpu_int8(
self,
) -> None:
weights_precision = SparseType.INT8
use_cpu = True
T = random.randint(1, 10)
D = random.randint(2, min(256, int(2048 / T)))
B = random.randint(1, min(128, int(2048 / T / D)))
L = random.randint(0, min(20, int(2048 / T / D / B)))
log_E = random.randint(3, 5)
use_cache = False
# cache_algorithm is don't care as we don't use cache.
cache_algorithm = split_table_batched_embeddings_ops.CacheAlgorithm.LRU
pooling_mode = random.choice(
[
split_table_batched_embeddings_ops.PoolingMode.SUM,
split_table_batched_embeddings_ops.PoolingMode.MEAN,
]
)
mixed = False
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
self.execute_forward_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
)
def test_forward_cpu_fp32(
self,
) -> None:
weights_precision = SparseType.FP32
use_cpu = True
T = random.randint(1, 10)
D = random.randint(2, min(256, int(2048 / T)))
B = random.randint(1, min(128, int(2048 / T / D)))
L = random.randint(0, min(20, int(2048 / T / D / B)))
log_E = random.randint(3, 5)
use_cache = False
# cache_algorithm is don't care as we don't use cache.
cache_algorithm = split_table_batched_embeddings_ops.CacheAlgorithm.LRU
pooling_mode = random.choice(
[
split_table_batched_embeddings_ops.PoolingMode.SUM,
split_table_batched_embeddings_ops.PoolingMode.MEAN,
]
)
mixed = False
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
self.execute_forward_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
)
@unittest.skipIf(*gpu_unavailable)
def test_forward_gpu_no_cache_int8(
self,
) -> None:
weights_precision = SparseType.INT8
use_cpu = False
T = random.randint(1, 10)
D = random.randint(2, 256)
B = random.randint(1, 128)
L = random.randint(0, 20)
log_E = random.randint(3, 5)
use_cache = False
# cache_algorithm is don't care as we don't use cache.
cache_algorithm = split_table_batched_embeddings_ops.CacheAlgorithm.LRU
pooling_mode = random.choice(
[
split_table_batched_embeddings_ops.PoolingMode.SUM,
split_table_batched_embeddings_ops.PoolingMode.MEAN,
split_table_batched_embeddings_ops.PoolingMode.NONE,
]
)
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.NONE:
mixed = False
else:
mixed = random.choice([True, False])
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
self.execute_forward_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
)
@unittest.skipIf(*gpu_unavailable)
def test_forward_gpu_no_cache_fp16(
self,
) -> None:
weights_precision = SparseType.FP16
use_cpu = False
T = random.randint(1, 10)
D = random.randint(2, 256)
B = random.randint(1, 128)
L = random.randint(0, 20)
log_E = random.randint(3, 5)
use_cache = False
# cache_algorithm is don't care as we don't use cache.
cache_algorithm = split_table_batched_embeddings_ops.CacheAlgorithm.LRU
pooling_mode = random.choice(
[
split_table_batched_embeddings_ops.PoolingMode.SUM,
split_table_batched_embeddings_ops.PoolingMode.MEAN,
split_table_batched_embeddings_ops.PoolingMode.NONE,
]
)
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.NONE:
mixed = False
else:
mixed = random.choice([True, False])
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
self.execute_forward_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
)
@unittest.skipIf(*gpu_unavailable)
def test_forward_gpu_no_cache_fp32(
self,
) -> None:
weights_precision = SparseType.FP32
use_cpu = False
T = random.randint(1, 10)
D = random.randint(2, 256)
B = random.randint(1, 128)
L = random.randint(0, 20)
log_E = random.randint(3, 5)
use_cache = False
# cache_algorithm is don't care as we don't use cache.
cache_algorithm = split_table_batched_embeddings_ops.CacheAlgorithm.LRU
pooling_mode = random.choice(
[
split_table_batched_embeddings_ops.PoolingMode.SUM,
split_table_batched_embeddings_ops.PoolingMode.MEAN,
split_table_batched_embeddings_ops.PoolingMode.NONE,
]
)
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.NONE:
mixed = False
else:
mixed = random.choice([True, False])
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
self.execute_forward_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
)
@unittest.skipIf(*gpu_unavailable)
@given(
cache_algorithm=st.sampled_from(
split_table_batched_embeddings_ops.CacheAlgorithm
),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_forward_gpu_uvm_cache_int8(
self,
cache_algorithm: split_table_batched_embeddings_ops.CacheAlgorithm,
) -> None:
weights_precision = SparseType.INT8
use_cpu = False
T = random.randint(1, 10)
D = random.randint(2, 256)
B = random.randint(1, 128)
L = random.randint(0, 20)
log_E = random.randint(3, 5)
use_cache = True
pooling_mode = random.choice(
[
split_table_batched_embeddings_ops.PoolingMode.SUM,
split_table_batched_embeddings_ops.PoolingMode.MEAN,
split_table_batched_embeddings_ops.PoolingMode.NONE,
]
)
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.NONE:
mixed = False
else:
mixed = random.choice([True, False])
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
self.execute_forward_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
)
@unittest.skipIf(*gpu_unavailable)
@given(
cache_algorithm=st.sampled_from(
split_table_batched_embeddings_ops.CacheAlgorithm
),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_forward_gpu_uvm_cache_fp16(
self,
cache_algorithm: split_table_batched_embeddings_ops.CacheAlgorithm,
) -> None:
weights_precision = SparseType.FP16
use_cpu = False
T = random.randint(1, 10)
D = random.randint(2, 256)
B = random.randint(1, 128)
L = random.randint(0, 20)
log_E = random.randint(3, 5)
use_cache = True
pooling_mode = random.choice(
[
split_table_batched_embeddings_ops.PoolingMode.SUM,
split_table_batched_embeddings_ops.PoolingMode.MEAN,
split_table_batched_embeddings_ops.PoolingMode.NONE,
]
)
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.NONE:
mixed = False
else:
mixed = random.choice([True, False])
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
self.execute_forward_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
)
@unittest.skipIf(*gpu_unavailable)
@given(
cache_algorithm=st.sampled_from(
split_table_batched_embeddings_ops.CacheAlgorithm
),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_forward_gpu_uvm_cache_fp32(
self,
cache_algorithm: split_table_batched_embeddings_ops.CacheAlgorithm,
) -> None:
weights_precision = SparseType.FP32
use_cpu = False
T = random.randint(1, 10)
D = random.randint(2, 256)
B = random.randint(1, 128)
L = random.randint(0, 20)
log_E = random.randint(3, 5)
use_cache = True
pooling_mode = random.choice(
[
split_table_batched_embeddings_ops.PoolingMode.SUM,
split_table_batched_embeddings_ops.PoolingMode.MEAN,
split_table_batched_embeddings_ops.PoolingMode.NONE,
]
)
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.NONE:
mixed = False
else:
mixed = random.choice([True, False])
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
self.execute_forward_(
T,
D,
B,
log_E,
L,
weights_precision,
weighted,
mixed,
use_cache,
cache_algorithm,
pooling_mode,
use_cpu,
)
@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=10),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
# FIXME: switch to
# output_dtype=st.sampled_from([SparseType.FP16, SparseType.INT8]),
# after v0/v2 is landed.
output_dtype=st.sampled_from([SparseType.FP32]),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much],
)
def test_forward_fused_pooled_emb_quant(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
output_dtype: SparseType,
) -> None:
Ds = [
round_up(np.random.randint(low=int(max(0.25 * D, 1)), high=int(1.0 * D)), 4)
for _ in range(T)
]
E = int(10 ** log_E)
Es = [np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)]
op = split_table_batched_embeddings_ops.SplitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
E,
D,
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE,
split_table_batched_embeddings_ops.ComputeDevice.CUDA,
)
for (E, D) in zip(Es, Ds)
],
output_dtype=output_dtype,
device=torch.cuda.current_device(),
)
op_ref = (
split_table_batched_embeddings_ops.SplitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
E,
D,
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE,
split_table_batched_embeddings_ops.ComputeDevice.CUDA,
)
for (E, D) in zip(Es, Ds)
],
output_dtype=SparseType.FP32,
device=torch.cuda.current_device(),
)
)
# sync weights between two ops
split_weights = op.split_embedding_weights()
ref_split_weights = op_ref.split_embedding_weights()
for t in range(T):
split_weights[t].data.copy_(ref_split_weights[t])
requests = generate_requests(2, B, T, L, min(Es), reuse=0.1)
for indices, offsets, _ in requests:
lowp_pooled_output = op(
indices=indices,
offsets=offsets,
)
fp32_pooled_output = op_ref(
indices=indices,
offsets=offsets,
)
lowp_pooled_emb_split = [
d + 8 if output_dtype == SparseType.INT8 else d for d in op.dims
]
lowp_pooled_output_per_table = torch.split(
lowp_pooled_output, lowp_pooled_emb_split, dim=1
)
deq_lowp_pooled_output_per_table = [
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(t.contiguous())
if output_dtype == SparseType.INT8
else t.float()
for t in lowp_pooled_output_per_table
]
fp32_pooled_output_per_table = torch.split(
fp32_pooled_output, op.dims, dim=1
)
dq_fp32_pooled_output_per_table = [
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(
torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(
t.contiguous()
).contiguous()
)
if output_dtype == SparseType.INT8
else t.half().float()
for t in fp32_pooled_output_per_table
]
cat_deq_lowp_pooled_output = torch.cat(
deq_lowp_pooled_output_per_table, dim=1
)
cat_dq_fp32_pooled_output = torch.cat(
dq_fp32_pooled_output_per_table, dim=1
)
torch.testing.assert_close(
cat_deq_lowp_pooled_output, cat_dq_fp32_pooled_output
)
@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=10),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weights_ty=st.sampled_from(
[
SparseType.FP32,
SparseType.FP16,
SparseType.INT8,
SparseType.INT4,
# FIXME: INT2 caused big numerical error for this test
# SparseType.INT2,
]
),
output_dtype=st.sampled_from(
[
SparseType.FP16,
SparseType.INT8,
# SparseType.INT4,
]
),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much],
)
def test_nbit_forward_fused_pooled_emb_quant(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weights_ty: SparseType,
output_dtype: SparseType,
) -> None:
D_alignment = max(weights_ty.align_size() for t in range(T))
D_alignment = max(D_alignment, output_dtype.align_size())
D = round_up(D, D_alignment)
Ds = [
round_up(
np.random.randint(low=int(max(0.25 * D, 1)), high=int(1.0 * D)),
D_alignment,
)
for _ in range(T)
]
Ds = [D] * T
E = int(10 ** log_E)
Es = [np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)]
weights_ty_list = [weights_ty] * T
managed = [split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE] * T
op = split_table_batched_embeddings_ops.IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
W_TY,
split_table_batched_embeddings_ops.EmbeddingLocation(M),
)
for (E, D, M, W_TY) in zip(Es, Ds, managed, weights_ty_list)
],
output_dtype=output_dtype,
device=torch.cuda.current_device(),
)
# Initilize the random weights for int nbit table split embedding bag
op.fill_random_weights()
op_ref = (
split_table_batched_embeddings_ops.IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
W_TY,
split_table_batched_embeddings_ops.EmbeddingLocation(M),
)
for (E, D, M, W_TY) in zip(Es, Ds, managed, weights_ty_list)
],
output_dtype=SparseType.FP32,
device=torch.cuda.current_device(),
)
)
# Initilize the random weights for int nbit table split embedding bag
op_ref.fill_random_weights()
# sync weights between two ops
split_weights = op.split_embedding_weights()
ref_split_weights = op_ref.split_embedding_weights()
for t in range(T):
(weights, scale_shift) = split_weights[t]
(ref_weights, ref_scale_shift) = ref_split_weights[t]
self.assertEqual(weights.size(), ref_weights.size())
element_size = weights_ty_list[t].bit_rate() / 8.0
rand_tensor = torch.rand(
ref_weights.shape[0], int(ref_weights.shape[1] / element_size)
)
rand_weights, rand_scale_shift = quantize_embs(
rand_tensor, weights_ty_list[t]
)
ref_weights.copy_(rand_weights)
weights.copy_(ref_weights)
if rand_scale_shift is not None:
self.assertIsNotNone(scale_shift)
self.assertIsNotNone(ref_scale_shift)
ref_scale_shift.copy_(rand_scale_shift)
scale_shift.copy_(ref_scale_shift)
requests = generate_requests(1, B, T, L, min(Es), reuse=0.1)
for indices, offsets, _ in requests:
lowp_pooled_output = op(
indices=indices.int(),
offsets=offsets.int(),
)
fp32_pooled_output = op_ref(
indices=indices.int(),
offsets=offsets.int(),
)
lowp_pooled_emb_split = [
d + 8 if output_dtype == SparseType.INT8 else d for d in Ds
]
lowp_pooled_output_per_table = torch.split(
lowp_pooled_output, lowp_pooled_emb_split, dim=1
)
deq_lowp_pooled_output_per_table = [
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(t.contiguous())
if output_dtype == SparseType.INT8
else t.float()
for t in lowp_pooled_output_per_table
]
fp32_pooled_output_per_table = torch.split(fp32_pooled_output, Ds, dim=1)
dq_fp32_pooled_output_per_table = [
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(
torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(
t.contiguous()
).contiguous()
).contiguous()
if output_dtype == SparseType.INT8
else t.half().float()
for t in fp32_pooled_output_per_table
]
cat_deq_lowp_pooled_output = torch.cat(
deq_lowp_pooled_output_per_table, dim=1
)
cat_dq_fp32_pooled_output = torch.cat(
dq_fp32_pooled_output_per_table, dim=1
)
torch.testing.assert_close(
cat_deq_lowp_pooled_output,
cat_dq_fp32_pooled_output,
rtol=1e-2,
atol=1e-2,
equal_nan=True,
)
@given(
T=st.integers(min_value=1, max_value=3),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=32),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=10),
weights_precision=st.sampled_from([SparseType.FP16, SparseType.FP32]),
weighted=st.booleans(),
mixed=st.booleans(),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
[
split_table_batched_embeddings_ops.PoolingMode.SUM,
split_table_batched_embeddings_ops.PoolingMode.MEAN,
split_table_batched_embeddings_ops.PoolingMode.NONE,
]
),
use_cpu=st.booleans() if gpu_available else st.just(True),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=10,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_dense(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weights_precision: SparseType,
weighted: bool,
mixed: bool,
long_segments: bool,
pooling_mode: split_table_batched_embeddings_ops.PoolingMode,
use_cpu: bool,
) -> None:
# NOTE: torch.autograd.gradcheck() is too time-consuming for CPU version
# so we have to limit (T * B * L * D)!
assume(not use_cpu or T * B * L * D <= 2048)
assume(
pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM
or not weighted
)
assume(not (use_cpu and weights_precision == SparseType.FP16))
# No bag ops only work on GPUs, no mixed, no weighted
assume(
not use_cpu
or pooling_mode != split_table_batched_embeddings_ops.PoolingMode.NONE
)
assume(
not mixed
or pooling_mode != split_table_batched_embeddings_ops.PoolingMode.NONE
)
assume(
not weighted
or pooling_mode != split_table_batched_embeddings_ops.PoolingMode.NONE
)
emb_op = (
split_table_batched_embeddings_ops.DenseTableBatchedEmbeddingBagsCodegen
)
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM:
mode = "sum"
do_pooling = True
elif pooling_mode == split_table_batched_embeddings_ops.PoolingMode.MEAN:
mode = "mean"
do_pooling = True
elif pooling_mode == split_table_batched_embeddings_ops.PoolingMode.NONE:
mode = "sum"
do_pooling = False
else:
# This proves that we have exhaustively checked all PoolingModes
raise RuntimeError("Unknown PoolingMode!")
E = int(10 ** log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(np.random.randint(low=int(0.25 * D), high=int(1.0 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2 * E)) for _ in range(T)
]
if do_pooling:
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=False), use_cpu)
for (E, D) in zip(Es, Ds)
]
else:
bs = [
to_device(torch.nn.Embedding(E, D, sparse=False), use_cpu)
for (E, D) in zip(Es, Ds)
]
if weights_precision == SparseType.FP16 and not use_cpu:
# NOTE: CPU version of torch.nn.EmbeddingBag doesn't support fp16.
bs = [b.half() for b in bs]
xs = [
to_device(
torch.from_numpy(
np.random.choice(range(e), size=(B, L), replace=True).astype(
np.int64
)
),
use_cpu,
)
for e in Es
]
if long_segments and L > 0 and weights_precision != SparseType.FP16:
for x in xs:
x[:, 0] = 0
xws = [to_device(torch.randn(size=(B, L)), use_cpu) for _ in range(T)]
xws_acc_type = copy.deepcopy(xws)
if weights_precision == SparseType.FP16 and not use_cpu:
# NOTE: CPU version of torch.nn.EmbeddingBag doesn't support fp16.
xws = [xw.half() for xw in xws]
fs = (
[
b_indices(b, x, use_cpu=use_cpu, do_pooling=do_pooling)
for (b, x) in zip(bs, xs)
]
if not weighted
else [
b_indices(
b,
x,
per_sample_weights=xw.view(-1),
use_cpu=use_cpu,
do_pooling=do_pooling,
)
for (b, x, xw) in zip(bs, xs, xws)
]
)
gos = [torch.randn_like(f) for f in fs]
[f.backward(go) for (f, go) in zip(fs, gos)]
grad_weights = torch.cat([b.weight.grad.view(-1) for b in bs])
if weights_precision == SparseType.FP16 and not use_cpu:
grad_weights = grad_weights.half()
cc = emb_op(
embedding_specs=[(E, D) for (E, D) in zip(Es, Ds)],
pooling_mode=pooling_mode,
use_cpu=use_cpu,
)
if weights_precision == SparseType.FP16 and not use_cpu:
cc = cc.half()
if do_pooling:
# NOTE: test TorchScript-compatible!
cc = torch.jit.script(cc)
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(bs[t].weight)
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu)
fc2 = (
cc(indices, offsets)
if not weighted
else cc(indices, offsets, to_device(xw.contiguous().view(-1), use_cpu))
)
if do_pooling:
f = torch.cat([f.view(B, -1) for f in fs], dim=1)
else:
f = torch.cat(fs, dim=0).view(-1, D)
torch.testing.assert_close(
fc2.float(),
f.float(),
atol=5.0e-3 if weights_precision == SparseType.FP16 else 1.0e-5,
rtol=5.0e-3 if weights_precision == SparseType.FP16 else 1.0e-5,
)
if do_pooling:
goc = torch.cat([go.view(B, -1) for go in gos], dim=1).contiguous()
else:
goc = torch.cat(gos, dim=0).contiguous()
fc2.backward(goc)
torch.testing.assert_close(
cc.weights.grad,
grad_weights,
atol=5.0e-3 if weights_precision == SparseType.FP16 else 1.0e-4,
rtol=5.0e-3 if weights_precision == SparseType.FP16 else 1.0e-4,
)
cc = split_table_batched_embeddings_ops.DenseTableBatchedEmbeddingBagsCodegen(
[(E, D) for (E, D) in zip(Es, Ds)],
# NOTE: only SUM pooling can work with per_sample_weights!
pooling_mode=split_table_batched_embeddings_ops.PoolingMode.SUM,
use_cpu=use_cpu,
).double()
per_sample_weights = to_device(xw.contiguous().view(-1), use_cpu).double()
per_sample_weights.requires_grad = True
indices.requires_grad = False
offsets.requires_grad = False
for param in cc.parameters():
param.requires_grad = False
torch.autograd.gradcheck(cc, (indices, offsets, per_sample_weights))
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weights_precision=st.sampled_from([SparseType.FP16, SparseType.FP32]),
weighted=st.booleans(),
mixed=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(
split_table_batched_embeddings_ops.CacheAlgorithm
),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
[
split_table_batched_embeddings_ops.PoolingMode.SUM,
split_table_batched_embeddings_ops.PoolingMode.MEAN,
split_table_batched_embeddings_ops.PoolingMode.NONE,
]
),
use_cpu=st.booleans() if gpu_available else st.just(True),
exact=st.booleans(),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_sgd( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weights_precision: SparseType,
weighted: bool,
mixed: bool,
use_cache: bool,
cache_algorithm: split_table_batched_embeddings_ops.CacheAlgorithm,
long_segments: bool,
pooling_mode: split_table_batched_embeddings_ops.PoolingMode,
use_cpu: bool,
exact: bool,
) -> None:
# NOTE: cache is not applicable to CPU version.
assume(not use_cpu or not use_cache)
# NOTE: limit (T * B * L * D) to avoid timeout for CPU version!
assume(not use_cpu or T * B * L * D <= 2048)
assume(not (use_cpu and weights_precision == SparseType.FP16))
# GPU only does exact sgd
assume((use_cpu and not long_segments) or exact)
# No bag ops only work on GPUs, no mixed, no weighted
assume(
not use_cpu
or pooling_mode != split_table_batched_embeddings_ops.PoolingMode.NONE
)
assume(
not mixed
or pooling_mode != split_table_batched_embeddings_ops.PoolingMode.NONE
)
assume(
not weighted
or pooling_mode != split_table_batched_embeddings_ops.PoolingMode.NONE
)
assume(
pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM
or not weighted
)
emb_op = (
split_table_batched_embeddings_ops.SplitTableBatchedEmbeddingBagsCodegen
)
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM:
mode = "sum"
do_pooling = True
elif pooling_mode == split_table_batched_embeddings_ops.PoolingMode.MEAN:
mode = "mean"
do_pooling = True
elif pooling_mode == split_table_batched_embeddings_ops.PoolingMode.NONE:
mode = "sum"
do_pooling = False
else:
# This proves that we have exhaustively checked all PoolingModes
raise RuntimeError("Unknown PoolingMode!")
E = int(10 ** log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(np.random.randint(low=int(0.25 * D), high=int(1.0 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
compute_device = split_table_batched_embeddings_ops.ComputeDevice.CUDA
if use_cpu:
managed = [split_table_batched_embeddings_ops.EmbeddingLocation.HOST] * T
compute_device = split_table_batched_embeddings_ops.ComputeDevice.CPU
elif use_cache:
managed = [
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED_CACHING
] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = (
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE
if d < average_D
else managed[t]
)
else:
managed = [
np.random.choice(
[
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE,
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED,
]
)
for _ in range(T)
]
if do_pooling:
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
else:
bs = [
to_device(torch.nn.Embedding(E, D, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
if weights_precision == SparseType.FP16 and not use_cpu:
# NOTE: CPU version of torch.nn.EmbeddingBag doesn't support fp16.
bs = [b.half() for b in bs]
feature_table_map = list(range(T))
if exact:
table_to_replicate = T // 2
bs.insert(table_to_replicate, bs[table_to_replicate])
feature_table_map.insert(table_to_replicate, table_to_replicate)
xs = [
to_device(
torch.from_numpy(
np.random.choice(range(Es[t]), size=(B, L), replace=exact).astype(
np.int64
)
),
use_cpu,
)
for t in feature_table_map
]
if long_segments and L > 0:
for x in xs:
x[:, 0] = 0
xws = [to_device(torch.randn(size=(B, L)), use_cpu) for _ in range(len(xs))]
xws_acc_type = copy.deepcopy(xws)
if weights_precision == SparseType.FP16 and not use_cpu:
# NOTE: CPU version of torch.nn.EmbeddingBag doesn't support fp16.
xws = [xw.half() for xw in xws]
fs = (
[
b_indices(b, x, use_cpu=use_cpu, do_pooling=do_pooling)
for (b, x) in zip(bs, xs)
]
if not weighted
else [
b_indices(
b,
x,
per_sample_weights=xw.view(-1),
use_cpu=use_cpu,
do_pooling=do_pooling,
)
for (b, x, xw) in zip(bs, xs, xws)
]
)
gos = [torch.randn_like(f) for f in fs]
[f.backward(go) for (f, go) in zip(fs, gos)]
# do SGD update
lr = 0.05
if exact:
# pyre-fixme[61]: `table_to_replicate` may not be initialized here.
del bs[table_to_replicate]
new_weights = [(b.weight - b.weight.grad * lr) for b in bs]
cc = emb_op(
embedding_specs=[
(E, D, M, compute_device) for (E, D, M) in zip(Es, Ds, managed)
],
optimizer=OptimType.EXACT_SGD if exact else OptimType.SGD,
feature_table_map=feature_table_map,
learning_rate=lr,
weights_precision=weights_precision,
cache_algorithm=cache_algorithm,
pooling_mode=pooling_mode,
)
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(bs[t].weight)
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu)
fc2 = (
cc(indices, offsets)
if not weighted
else cc(indices, offsets, to_device(xw.contiguous().view(-1), use_cpu))
)
if do_pooling:
goc = torch.cat([go.view(B, -1) for go in gos], dim=1).contiguous()
else:
goc = torch.cat(gos, dim=0).contiguous()
fc2.backward(goc)
if use_cache:
cc.flush()
for t in range(T):
torch.testing.assert_close(
cc.split_embedding_weights()[t],
# pyre-fixme[16]: `float` has no attribute `half`.
new_weights[t].half()
if weights_precision == SparseType.FP16 and not use_cpu
else new_weights[t],
atol=(1.0e-2 if long_segments else 5.0e-3)
if weights_precision == SparseType.FP16
else 1.0e-5,
rtol=2.0e-2 if weights_precision == SparseType.FP16 else 1.0e-5,
)
def execute_backward_adagrad_( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
D_gradcheck: int,
weights_precision: SparseType,
stochastic_rounding: bool,
weighted: bool,
row_wise: bool,
mixed: bool,
use_cache: bool,
cache_algorithm: split_table_batched_embeddings_ops.CacheAlgorithm,
pooling_mode: split_table_batched_embeddings_ops.PoolingMode,
use_cpu: bool,
exact: bool,
) -> None:
# NOTE: cache is not applicable to CPU version.
assume(not use_cpu or not use_cache)
# Approx AdaGrad only works with row_wise on CPU
assume((use_cpu and row_wise) or exact)
# NOTE: torch.autograd.gradcheck() is too time-consuming for CPU version
# so we have to limit (T * B * L * D)!
assume(not use_cpu or T * B * L * D <= 1024)
assume(not (use_cpu and weights_precision == SparseType.FP16))
assume(
pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM
or not weighted
) # No bag ops only work on GPUs, no mixed, no weighted
assume(
not use_cpu
or pooling_mode != split_table_batched_embeddings_ops.PoolingMode.NONE
)
assume(
not mixed
or pooling_mode != split_table_batched_embeddings_ops.PoolingMode.NONE
)
assume(
not weighted
or pooling_mode != split_table_batched_embeddings_ops.PoolingMode.NONE
)
emb_op = (
split_table_batched_embeddings_ops.SplitTableBatchedEmbeddingBagsCodegen
)
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM:
mode = "sum"
do_pooling = True
elif pooling_mode == split_table_batched_embeddings_ops.PoolingMode.MEAN:
mode = "mean"
do_pooling = True
elif pooling_mode == split_table_batched_embeddings_ops.PoolingMode.NONE:
mode = "sum"
do_pooling = False
else:
# This proves that we have exhaustively checked all PoolingModes
raise RuntimeError("Unknown PoolingMode!")
# stochastic rounding only implemented for rowwise
assume(not stochastic_rounding or row_wise)
# need unique indices for non-exact tests
assume(exact or int(10 ** log_E) > int(2.1 * B * L))
# only row-wise supports caching
assume(row_wise or not use_cache)
E = int(10 ** log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(np.random.randint(low=int(0.25 * D), high=int(1.0 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
compute_device = split_table_batched_embeddings_ops.ComputeDevice.CUDA
if use_cpu:
managed = [split_table_batched_embeddings_ops.EmbeddingLocation.HOST] * T
compute_device = split_table_batched_embeddings_ops.ComputeDevice.CPU
elif use_cache:
managed = [
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED_CACHING
] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = (
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE
if d < average_D
else managed[t]
)
else:
managed = [
np.random.choice(
[
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE,
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED,
]
)
for _ in range(T)
]
if do_pooling:
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
else:
bs = [
to_device(torch.nn.Embedding(E, D, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
if weights_precision == SparseType.FP16 and not use_cpu:
# NOTE: CPU version of torch.nn.EmbeddingBag doesn't support fp16.
bs = [b.half() for b in bs]
feature_table_map = list(range(T))
if exact:
# autograd with shared embedding only works for exact
table_to_replicate = T // 2
bs.insert(table_to_replicate, bs[table_to_replicate])
feature_table_map.insert(table_to_replicate, table_to_replicate)
xs = [
to_device(
torch.from_numpy(
np.random.choice(range(Es[t]), size=(B, L), replace=exact).astype(
np.int64
)
),
use_cpu,
)
for t in feature_table_map
]
xws = [to_device(torch.randn(size=(B, L)), use_cpu) for _ in range(len(xs))]
xws_acc_type = copy.deepcopy(xws)
if weights_precision == SparseType.FP16 and not use_cpu:
# NOTE: CPU version of torch.nn.EmbeddingBag doesn't support fp16.
xws = [xw.half() for xw in xws]
fs = (
[
b_indices(b, x, use_cpu=use_cpu, do_pooling=do_pooling)
for (b, x) in zip(bs, xs)
]
if not weighted
else [
b_indices(
b,
x,
per_sample_weights=xw.view(-1),
use_cpu=use_cpu,
do_pooling=do_pooling,
)
for (b, x, xw) in zip(bs, xs, xws)
]
)
gos = [torch.randn_like(f) for f in fs]
[f.backward(go) for (f, go) in zip(fs, gos)]
# do SGD update
lr = 0.5
eps = 0.2
optimizer = (
(OptimType.EXACT_ROWWISE_ADAGRAD if exact else OptimType.ROWWISE_ADAGRAD)
if row_wise
else OptimType.EXACT_ADAGRAD
)
cc = emb_op(
embedding_specs=[
(E, D, M, compute_device) for (E, D, M) in zip(Es, Ds, managed)
],
feature_table_map=feature_table_map,
optimizer=optimizer,
learning_rate=lr,
eps=eps,
weights_precision=weights_precision,
stochastic_rounding=stochastic_rounding,
pooling_mode=pooling_mode,
)
if exact:
# pyre-fixme[61]: `table_to_replicate` may not be initialized here.
del bs[table_to_replicate]
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(bs[t].weight)
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu)
fc2 = (
cc(indices, offsets)
if not weighted
else cc(indices, offsets, to_device(xw.contiguous().view(-1), use_cpu))
)
if do_pooling:
goc = torch.cat([go.view(B, -1) for go in gos], dim=1)
else:
goc = torch.cat(gos, dim=0).contiguous()
fc2.backward(goc)
cc.flush()
split_optimizer_states = [s for (s,) in cc.split_optimizer_states()]
for t in range(T):
ref_optimizer_state = bs[t].weight.grad.float().cpu().to_dense().pow(2)
torch.testing.assert_close(
split_optimizer_states[t].float().cpu(),
ref_optimizer_state.mean(dim=1) if row_wise else ref_optimizer_state,
atol=1.0e-2 if weights_precision == SparseType.FP16 else 1.0e-4,
rtol=1.0e-2 if weights_precision == SparseType.FP16 else 1.0e-4,
)
for t in range(T):
# optimizer_state = squares (no row-wise) or sum squares (row-wise)
torch.testing.assert_close(
cc.split_embedding_weights()[t].float().cpu(),
torch.addcdiv(
bs[t].weight.float().cpu(),
value=-lr,
tensor1=bs[t].weight.grad.float().cpu().to_dense(),
tensor2=split_optimizer_states[t]
.float()
.sqrt_()
.add_(eps)
.view(Es[t], 1 if row_wise else Ds[t])
.cpu(),
),
atol=1.0e-2 if weights_precision == SparseType.FP16 else 1.0e-4,
rtol=1.0e-2 if weights_precision == SparseType.FP16 else 1.0e-4,
)
if use_cpu:
D_gradcheck = (D_gradcheck + 15) // 16 * 4
else:
D_gradcheck = D_gradcheck * 4
cc = emb_op(
embedding_specs=[
(E, D_gradcheck, M, compute_device) for (E, M) in zip(Es, managed)
],
feature_table_map=feature_table_map,
optimizer=optimizer,
learning_rate=0.0,
eps=eps,
weights_precision=weights_precision,
stochastic_rounding=stochastic_rounding,
# NOTE: only SUM pooling can work with per_sample_weights!
pooling_mode=split_table_batched_embeddings_ops.PoolingMode.SUM,
)
if use_cpu:
# NOTE: GPU version of SplitTableBatchedEmbeddingBagsCodegen doesn't support double.
cc = cc.double()
per_sample_weights = to_device(xw.contiguous().view(-1), use_cpu)
if use_cpu:
per_sample_weights = per_sample_weights.double()
per_sample_weights.requires_grad = True
indices.requires_grad = False
offsets.requires_grad = False
for param in cc.parameters():
param.requires_grad = False
torch.autograd.gradcheck(cc, (indices, offsets, per_sample_weights))
per_sample_weights = to_device(xw.contiguous().view(-1), use_cpu)
if use_cpu:
per_sample_weights = per_sample_weights.double()
per_sample_weights.requires_grad = True
indices.requires_grad = False
offsets.requires_grad = False
for param in cc.parameters():
param.requires_grad = False
y = cc(indices, offsets, per_sample_weights)
y.sum().backward()
indice_weight_grad_all = per_sample_weights.grad.clone().cpu()
T_ = len(xws)
feature_requires_grad = to_device(
torch.tensor(np.random.choice([0, 1], replace=True, size=(T_,))).int(),
use_cpu,
)
per_sample_weights = per_sample_weights.detach().clone()
per_sample_weights.requires_grad = True
y = cc(
indices,
offsets,
per_sample_weights,
feature_requires_grad=feature_requires_grad,
)
y.sum().backward()
indice_weight_grad_mask = per_sample_weights.grad.clone().cpu()
for t in range(T_):
if feature_requires_grad[t]:
torch.testing.assert_close(
indice_weight_grad_mask.view(T_, B, L)[t],
indice_weight_grad_all.view(T_, B, L)[t],
)
else:
torch.testing.assert_close(
indice_weight_grad_mask.view(T_, B, L)[t],
torch.zeros_like(indice_weight_grad_mask.view(T_, B, L)[t]),
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
D_gradcheck=st.integers(min_value=1, max_value=2),
weights_precision=st.just(SparseType.FP16),
stochastic_rounding=st.booleans(),
weighted=st.booleans(),
row_wise=st.booleans(),
mixed=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(
split_table_batched_embeddings_ops.CacheAlgorithm
),
use_cpu=st.booleans() if gpu_available else st.just(True),
exact=st.booleans(),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_adagrad_fp16_pmSUM( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
D_gradcheck: int,
weights_precision: SparseType,
stochastic_rounding: bool,
weighted: bool,
row_wise: bool,
mixed: bool,
use_cache: bool,
cache_algorithm: split_table_batched_embeddings_ops.CacheAlgorithm,
use_cpu: bool,
exact: bool,
) -> None:
self.execute_backward_adagrad_(
T,
D,
B,
log_E,
L,
D_gradcheck,
weights_precision,
stochastic_rounding,
weighted,
row_wise,
mixed,
use_cache,
cache_algorithm,
split_table_batched_embeddings_ops.PoolingMode.SUM,
use_cpu,
exact,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
D_gradcheck=st.integers(min_value=1, max_value=2),
weights_precision=st.just(SparseType.FP16),
stochastic_rounding=st.booleans(),
weighted=st.booleans(),
row_wise=st.booleans(),
mixed=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(
split_table_batched_embeddings_ops.CacheAlgorithm
),
use_cpu=st.booleans() if gpu_available else st.just(True),
exact=st.booleans(),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_adagrad_fp16_pmMEAN( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
D_gradcheck: int,
weights_precision: SparseType,
stochastic_rounding: bool,
weighted: bool,
row_wise: bool,
mixed: bool,
use_cache: bool,
cache_algorithm: split_table_batched_embeddings_ops.CacheAlgorithm,
use_cpu: bool,
exact: bool,
) -> None:
self.execute_backward_adagrad_(
T,
D,
B,
log_E,
L,
D_gradcheck,
weights_precision,
stochastic_rounding,
weighted,
row_wise,
mixed,
use_cache,
cache_algorithm,
split_table_batched_embeddings_ops.PoolingMode.MEAN,
use_cpu,
exact,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
D_gradcheck=st.integers(min_value=1, max_value=2),
weights_precision=st.just(SparseType.FP16),
stochastic_rounding=st.booleans(),
weighted=st.booleans(),
row_wise=st.booleans(),
mixed=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(
split_table_batched_embeddings_ops.CacheAlgorithm
),
use_cpu=st.booleans() if gpu_available else st.just(True),
exact=st.booleans(),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_adagrad_fp16_pmNONE( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
D_gradcheck: int,
weights_precision: SparseType,
stochastic_rounding: bool,
weighted: bool,
row_wise: bool,
mixed: bool,
use_cache: bool,
cache_algorithm: split_table_batched_embeddings_ops.CacheAlgorithm,
use_cpu: bool,
exact: bool,
) -> None:
self.execute_backward_adagrad_(
T,
D,
B,
log_E,
L,
D_gradcheck,
weights_precision,
stochastic_rounding,
weighted,
row_wise,
mixed,
use_cache,
cache_algorithm,
split_table_batched_embeddings_ops.PoolingMode.NONE,
use_cpu,
exact,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
D_gradcheck=st.integers(min_value=1, max_value=2),
weights_precision=st.just(SparseType.FP32),
stochastic_rounding=st.booleans(),
weighted=st.booleans(),
row_wise=st.booleans(),
mixed=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(
split_table_batched_embeddings_ops.CacheAlgorithm
),
use_cpu=st.booleans() if gpu_available else st.just(True),
exact=st.booleans(),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_adagrad_fp32_pmSUM( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
D_gradcheck: int,
weights_precision: SparseType,
stochastic_rounding: bool,
weighted: bool,
row_wise: bool,
mixed: bool,
use_cache: bool,
cache_algorithm: split_table_batched_embeddings_ops.CacheAlgorithm,
use_cpu: bool,
exact: bool,
) -> None:
self.execute_backward_adagrad_(
T,
D,
B,
log_E,
L,
D_gradcheck,
weights_precision,
stochastic_rounding,
weighted,
row_wise,
mixed,
use_cache,
cache_algorithm,
split_table_batched_embeddings_ops.PoolingMode.SUM,
use_cpu,
exact,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
D_gradcheck=st.integers(min_value=1, max_value=2),
weights_precision=st.just(SparseType.FP32),
stochastic_rounding=st.booleans(),
weighted=st.booleans(),
row_wise=st.booleans(),
mixed=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(
split_table_batched_embeddings_ops.CacheAlgorithm
),
use_cpu=st.booleans() if gpu_available else st.just(True),
exact=st.booleans(),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_adagrad_fp32_pmMEAN( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
D_gradcheck: int,
weights_precision: SparseType,
stochastic_rounding: bool,
weighted: bool,
row_wise: bool,
mixed: bool,
use_cache: bool,
cache_algorithm: split_table_batched_embeddings_ops.CacheAlgorithm,
use_cpu: bool,
exact: bool,
) -> None:
self.execute_backward_adagrad_(
T,
D,
B,
log_E,
L,
D_gradcheck,
weights_precision,
stochastic_rounding,
weighted,
row_wise,
mixed,
use_cache,
cache_algorithm,
split_table_batched_embeddings_ops.PoolingMode.MEAN,
use_cpu,
exact,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=128),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
D_gradcheck=st.integers(min_value=1, max_value=2),
weights_precision=st.just(SparseType.FP32),
stochastic_rounding=st.booleans(),
weighted=st.booleans(),
row_wise=st.booleans(),
mixed=st.booleans(),
use_cache=st.booleans(),
cache_algorithm=st.sampled_from(
split_table_batched_embeddings_ops.CacheAlgorithm
),
use_cpu=st.booleans() if gpu_available else st.just(True),
exact=st.booleans(),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
def test_backward_adagrad_fp32_pmNONE( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
D_gradcheck: int,
weights_precision: SparseType,
stochastic_rounding: bool,
weighted: bool,
row_wise: bool,
mixed: bool,
use_cache: bool,
cache_algorithm: split_table_batched_embeddings_ops.CacheAlgorithm,
use_cpu: bool,
exact: bool,
) -> None:
self.execute_backward_adagrad_(
T,
D,
B,
log_E,
L,
D_gradcheck,
weights_precision,
stochastic_rounding,
weighted,
row_wise,
mixed,
use_cache,
cache_algorithm,
split_table_batched_embeddings_ops.PoolingMode.NONE,
use_cpu,
exact,
)
@unittest.skipIf(*gpu_unavailable)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=1, max_value=20),
mixed=st.booleans(),
cache_algorithm=st.sampled_from(
split_table_batched_embeddings_ops.CacheAlgorithm
),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_cache_pipeline(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
mixed: bool,
cache_algorithm: split_table_batched_embeddings_ops.CacheAlgorithm,
) -> None:
iters = 3
E = int(10 ** log_E)
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(np.random.randint(low=int(0.25 * D), high=int(1.0 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
managed = [
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED_CACHING
] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = (
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE
if d < average_D
else managed[t]
)
cc_ref = (
split_table_batched_embeddings_ops.SplitTableBatchedEmbeddingBagsCodegen(
[
(
E,
D,
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE,
split_table_batched_embeddings_ops.ComputeDevice.CUDA,
)
for (E, D) in zip(Es, Ds)
],
)
)
cc = split_table_batched_embeddings_ops.SplitTableBatchedEmbeddingBagsCodegen(
[
(E, D, M, split_table_batched_embeddings_ops.ComputeDevice.CUDA)
for (E, D, M) in zip(Es, Ds, managed)
],
cache_algorithm=cache_algorithm,
)
for t in range(T):
self.assertEqual(
cc.split_embedding_weights()[t].size(),
cc_ref.split_embedding_weights()[t].size(),
)
cc.split_embedding_weights()[t].data.copy_(
cc_ref.split_embedding_weights()[t]
)
requests = generate_requests(iters, B, T, L, min(Es), reuse=0.1)
grad_output = torch.randn(B, sum(Ds)).cuda()
for indices, offsets, _ in requests:
output = cc(indices, offsets)
output_ref = cc_ref(indices, offsets)
torch.testing.assert_close(output, output_ref)
output.backward(grad_output)
output_ref.backward(grad_output)
cc.flush()
for t in range(T):
torch.testing.assert_close(
cc.split_embedding_weights()[t], cc_ref.split_embedding_weights()[t]
)
def execute_backward_optimizers_( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weighted: bool,
mixed: bool,
optimizer: OptimType,
long_segments: bool,
pooling_mode: split_table_batched_embeddings_ops.PoolingMode,
use_cpu: bool,
) -> None:
# NOTE: limit (T * B * L * D) to avoid timeout for CPU version!
assume(not use_cpu or T * B * L * D <= 2048)
assume(
not use_cpu
or optimizer
in [
OptimType.EXACT_ADAGRAD,
OptimType.EXACT_ROWWISE_ADAGRAD,
OptimType.EXACT_SGD,
OptimType.SGD,
]
)
assume(
pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM
or not weighted
)
# No bag ops only work on GPUs, no mixed, no weighted
assume(
not use_cpu
or pooling_mode != split_table_batched_embeddings_ops.PoolingMode.NONE
)
assume(
not mixed
or pooling_mode != split_table_batched_embeddings_ops.PoolingMode.NONE
)
assume(
not weighted
or pooling_mode != split_table_batched_embeddings_ops.PoolingMode.NONE
)
emb_op = (
split_table_batched_embeddings_ops.SplitTableBatchedEmbeddingBagsCodegen
)
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM:
mode = "sum"
do_pooling = True
elif pooling_mode == split_table_batched_embeddings_ops.PoolingMode.MEAN:
mode = "mean"
do_pooling = True
elif pooling_mode == split_table_batched_embeddings_ops.PoolingMode.NONE:
mode = "sum"
do_pooling = False
else:
# This proves that we have exhaustively checked all PoolingModes
raise RuntimeError("Unknown PoolingMode!")
E = int(10 ** log_E)
if use_cpu:
D = (D + 15) // 16 * 4
else:
D = D * 4
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(np.random.randint(low=int(0.25 * D), high=int(1.0 * D)), 4)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
compute_device = split_table_batched_embeddings_ops.ComputeDevice.CUDA
if use_cpu:
managed = [split_table_batched_embeddings_ops.EmbeddingLocation.HOST] * T
compute_device = split_table_batched_embeddings_ops.ComputeDevice.CPU
else:
managed = [
np.random.choice(
[
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE,
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED,
]
)
for _ in range(T)
]
if do_pooling:
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
else:
bs = [
to_device(torch.nn.Embedding(E, D, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
xs = [
to_device(
torch.from_numpy(
np.random.choice(range(e), size=(B, L), replace=True).astype(
np.int64
)
),
use_cpu,
)
for e in Es
]
if long_segments and L > 0:
for x, e in zip(xs, Es):
x[:, 0] = np.random.randint(low=0, high=e)
xws = [to_device(torch.randn(size=(B, L)), use_cpu) for _ in range(T)]
xws_acc_type = copy.deepcopy(xws)
fs = (
[
b_indices(b, x, use_cpu=use_cpu, do_pooling=do_pooling)
for (b, x) in zip(bs, xs)
]
if not weighted
else [
b_indices(
b,
x,
per_sample_weights=xw.view(-1),
use_cpu=use_cpu,
do_pooling=do_pooling,
)
for (b, x, xw) in zip(bs, xs, xws)
]
)
gos = [torch.randn_like(f) for f in fs]
[f.backward(go) for (f, go) in zip(fs, gos)]
# do SGD update
optimizer_kwargs = {"learning_rate": 0.5}
(lr, eps, beta1, beta2, weight_decay, momentum, eta) = (
0.5,
1e-4,
0.9,
0.99,
0.01,
0.9,
0.01,
)
if optimizer in (OptimType.EXACT_ROWWISE_ADAGRAD, OptimType.EXACT_ADAGRAD):
optimizer_kwargs["eps"] = eps
if optimizer == OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD:
optimizer_kwargs["eps"] = eps
optimizer_kwargs["weight_decay"] = weight_decay
if optimizer in (OptimType.PARTIAL_ROWWISE_ADAM, OptimType.ADAM):
optimizer_kwargs["eps"] = eps
optimizer_kwargs["beta1"] = beta1
optimizer_kwargs["beta2"] = beta2
optimizer_kwargs["weight_decay"] = weight_decay
if optimizer in (OptimType.PARTIAL_ROWWISE_LAMB, OptimType.LAMB):
optimizer_kwargs["eps"] = eps
optimizer_kwargs["beta1"] = beta1
optimizer_kwargs["beta2"] = beta2
optimizer_kwargs["weight_decay"] = weight_decay
if optimizer == OptimType.LARS_SGD:
optimizer_kwargs["weight_decay"] = weight_decay
optimizer_kwargs["momentum"] = momentum
optimizer_kwargs["eta"] = eta
cc = emb_op(
embedding_specs=[
(E, D, M, compute_device) for (E, D, M) in zip(Es, Ds, managed)
],
optimizer=optimizer,
pooling_mode=pooling_mode,
# pyre-fixme[6]: Expected `CacheAlgorithm` for 5th param but got `float`.
**optimizer_kwargs,
)
for t in range(T):
cc.split_embedding_weights()[t].data.copy_(bs[t].weight)
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu)
fc2 = (
cc(indices, offsets)
if not weighted
else cc(indices, offsets, to_device(xw.contiguous().view(-1), use_cpu))
)
if do_pooling:
goc = torch.cat([go.view(B, -1) for go in gos], dim=1)
else:
goc = torch.cat(gos, dim=0).contiguous()
fc2.backward(goc)
cc.flush()
split_optimizer_states = cc.split_optimizer_states()
self.assertEqual(len(split_optimizer_states), T)
split_weights = cc.split_embedding_weights()
if optimizer in (OptimType.EXACT_ROWWISE_ADAGRAD, OptimType.EXACT_ADAGRAD):
rowwise = optimizer == OptimType.EXACT_ROWWISE_ADAGRAD
for t in range(T):
(m1,) = split_optimizer_states[t]
# to_dense in GPU is non-deterministic due to atmomics used in
# coalescing and floating point non-associativity.
dense_cpu_grad = bs[t].weight.grad.cpu().to_dense()
m1_ref = (
dense_cpu_grad.pow(2)
if not rowwise
else dense_cpu_grad.pow(2).mean(dim=1)
)
torch.testing.assert_close(
m1.float().cpu(), m1_ref.float(), atol=1.0e-4, rtol=1.0e-4
)
weights_new = split_weights[t]
weights_ref = bs[t].weight.cpu() - lr * dense_cpu_grad / (
torch.sqrt(
m1_ref if not rowwise else m1_ref.view(m1_ref.numel(), 1)
)
+ eps
)
# TODO: why is tolerance off here?
torch.testing.assert_close(
weights_new.float().cpu(),
weights_ref.float(),
atol=1.0e-2,
rtol=1.0e-2,
)
if optimizer == OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD:
for t in range(T):
(m1,) = split_optimizer_states[t]
# to_dense in GPU is non-deterministic due to atmomics used in
# coalescing and floating point non-associativity.
dense_cpu_grad = bs[t].weight.grad.cpu().to_dense()
dense_cpu_grad += weight_decay * bs[t].weight.cpu()
iter_ = cc.iter.item()
lambda_ = (iter_ + 1) ** 0.5
m1_ref = dense_cpu_grad.pow(2).mean(dim=1)
m1_ref *= lambda_
torch.testing.assert_close(
m1.float().index_select(dim=0, index=x[t].view(-1)).cpu(),
m1_ref.float().index_select(dim=0, index=x[t].view(-1).cpu()),
atol=1.0e-4,
rtol=1.0e-4,
)
weights_new = split_weights[t]
weights_ref = bs[t].weight.cpu() - lr * lambda_ * dense_cpu_grad / (
torch.pow(m1_ref.view(m1_ref.numel(), 1), 1.0 / 3) + eps
)
torch.testing.assert_close(
weights_new.index_select(dim=0, index=x[t].view(-1)).cpu(),
weights_ref.index_select(dim=0, index=x[t].view(-1).cpu()),
atol=1.0e-4,
rtol=1.0e-4,
)
if optimizer in (OptimType.PARTIAL_ROWWISE_ADAM, OptimType.ADAM):
rowwise = optimizer == OptimType.PARTIAL_ROWWISE_ADAM
for t in range(T):
(m1, m2) = split_optimizer_states[t]
dense_cpu_grad = bs[t].weight.grad.cpu().to_dense()
m2_ref = (
dense_cpu_grad.pow(2)
if not rowwise
else dense_cpu_grad.pow(2).mean(dim=1)
) * (1.0 - beta2)
torch.testing.assert_close(m2.cpu(), m2_ref, atol=1.0e-4, rtol=1.0e-4)
m1_ref = dense_cpu_grad * (1.0 - beta1)
torch.testing.assert_close(m1.cpu(), m1_ref, atol=1.0e-4, rtol=1.0e-4)
iter_ = cc.iter.item()
v_hat_t = m2_ref / (1 - beta2 ** iter_)
v_hat_t = v_hat_t if not rowwise else v_hat_t.view(v_hat_t.numel(), 1)
m_hat_t = m1_ref / (1 - beta1 ** iter_)
weights_new = split_weights[t]
weights_ref = (
torch.addcdiv(
bs[t].weight.cpu(),
value=-lr,
tensor1=m_hat_t,
tensor2=v_hat_t.sqrt_().add_(eps),
)
- lr * weight_decay * bs[t].weight.cpu()
)
torch.testing.assert_close(
weights_new.index_select(dim=0, index=x[t].view(-1)).cpu(),
weights_ref.index_select(dim=0, index=x[t].view(-1).cpu()),
atol=1.0e-3,
rtol=1.0e-3,
)
if optimizer in (OptimType.PARTIAL_ROWWISE_LAMB, OptimType.LAMB):
rowwise = optimizer == OptimType.PARTIAL_ROWWISE_LAMB
for t in range(T):
(m1, m2) = split_optimizer_states[t]
dense_cpu_grad = bs[t].weight.grad.cpu().to_dense()
m2_ref = (
dense_cpu_grad.pow(2)
if not rowwise
else dense_cpu_grad.pow(2).mean(dim=1)
) * (1.0 - beta2)
torch.testing.assert_close(m2.cpu(), m2_ref, atol=1.0e-4, rtol=1.0e-4)
m1_ref = dense_cpu_grad * (1.0 - beta1)
torch.testing.assert_close(m1.cpu(), m1_ref, atol=1.0e-4, rtol=1.0e-4)
iter_ = cc.iter.item()
v_hat_t = m2_ref / (1 - beta2 ** iter_)
v_hat_t = v_hat_t if not rowwise else v_hat_t.view(v_hat_t.numel(), 1)
m_hat_t = m1_ref / (1 - beta1 ** iter_)
rtw = (m_hat_t / (torch.sqrt(v_hat_t) + eps)) + weight_decay * bs[
t
].weight.cpu()
true_ratio = torch.linalg.norm(bs[t].weight, dim=1, ord=2).view(
m1.shape[0], 1
).cpu() / torch.linalg.norm(rtw, dim=1, ord=2).view(m1.shape[0], 1)
weights_new = split_weights[t]
weights_ref = bs[t].weight.cpu() - lr * true_ratio * rtw
torch.testing.assert_close(
weights_new.index_select(dim=0, index=x[t].view(-1)).cpu(),
weights_ref.index_select(dim=0, index=x[t].view(-1).cpu()),
atol=1.0e-3,
rtol=1.0e-3,
)
if optimizer == OptimType.LARS_SGD:
for t in range(T):
(m1,) = split_optimizer_states[t]
weight_norm = (
torch.linalg.norm(bs[t].weight, dim=1, ord=2)
.view(m1.shape[0], 1)
.cpu()
)
dense_cpu_grad = bs[t].weight.grad.cpu().to_dense()
grad_norm = torch.linalg.norm(dense_cpu_grad, dim=1, ord=2).view(
m1.shape[0], 1
)
adjusted_lr = (
lr * eta * weight_norm / (grad_norm + weight_decay * weight_norm)
)
m1_ref = adjusted_lr * (
dense_cpu_grad + weight_decay * bs[t].weight.cpu()
)
torch.testing.assert_close(
m1.index_select(dim=0, index=x[t].view(-1)).cpu(),
# pyre-fixme[16]: `float` has no attribute `index_select`.
m1_ref.index_select(dim=0, index=x[t].view(-1).cpu()),
atol=1.0e-4,
rtol=1.0e-4,
)
weights_new = split_weights[t]
weights_ref = bs[t].weight.cpu() - m1_ref
torch.testing.assert_close(
weights_new.index_select(dim=0, index=x[t].view(-1)).cpu(),
weights_ref.index_select(dim=0, index=x[t].view(-1).cpu()),
atol=1.0e-4,
rtol=1.0e-4,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weighted=st.booleans(),
mixed=st.booleans(),
optimizer=st.sampled_from(
[
OptimType.ADAM,
OptimType.PARTIAL_ROWWISE_ADAM,
]
),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
[
split_table_batched_embeddings_ops.PoolingMode.SUM,
split_table_batched_embeddings_ops.PoolingMode.MEAN,
split_table_batched_embeddings_ops.PoolingMode.NONE,
]
),
use_cpu=st.booleans() if gpu_available else st.just(True),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
@unittest.skipIf(*gpu_unavailable)
def test_backward_optimizers_adam( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weighted: bool,
mixed: bool,
optimizer: OptimType,
long_segments: bool,
pooling_mode: split_table_batched_embeddings_ops.PoolingMode,
use_cpu: bool,
) -> None:
self.execute_backward_optimizers_(
T,
D,
B,
log_E,
L,
weighted,
mixed,
optimizer,
long_segments,
pooling_mode,
use_cpu,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weighted=st.booleans(),
mixed=st.booleans(),
optimizer=st.sampled_from(
[
OptimType.EXACT_ADAGRAD,
OptimType.EXACT_ROWWISE_ADAGRAD,
OptimType.EXACT_ROWWISE_WEIGHTED_ADAGRAD,
]
),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
[
split_table_batched_embeddings_ops.PoolingMode.SUM,
split_table_batched_embeddings_ops.PoolingMode.MEAN,
split_table_batched_embeddings_ops.PoolingMode.NONE,
]
),
use_cpu=st.booleans() if gpu_available else st.just(True),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
@unittest.skipIf(*gpu_unavailable)
def test_backward_optimizers_adagrad( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weighted: bool,
mixed: bool,
optimizer: OptimType,
long_segments: bool,
pooling_mode: split_table_batched_embeddings_ops.PoolingMode,
use_cpu: bool,
) -> None:
self.execute_backward_optimizers_(
T,
D,
B,
log_E,
L,
weighted,
mixed,
optimizer,
long_segments,
pooling_mode,
use_cpu,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weighted=st.booleans(),
mixed=st.booleans(),
optimizer=st.sampled_from(
[
OptimType.LAMB,
OptimType.PARTIAL_ROWWISE_LAMB,
]
),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
[
split_table_batched_embeddings_ops.PoolingMode.SUM,
split_table_batched_embeddings_ops.PoolingMode.MEAN,
split_table_batched_embeddings_ops.PoolingMode.NONE,
]
),
use_cpu=st.booleans() if gpu_available else st.just(True),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
@unittest.skipIf(*gpu_unavailable)
def test_backward_optimizers_lamb( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weighted: bool,
mixed: bool,
optimizer: OptimType,
long_segments: bool,
pooling_mode: split_table_batched_embeddings_ops.PoolingMode,
use_cpu: bool,
) -> None:
self.execute_backward_optimizers_(
T,
D,
B,
log_E,
L,
weighted,
mixed,
optimizer,
long_segments,
pooling_mode,
use_cpu,
)
@given(
T=st.integers(min_value=1, max_value=5),
D=st.integers(min_value=2, max_value=256),
B=st.integers(min_value=1, max_value=128),
log_E=st.integers(min_value=3, max_value=5),
L=st.integers(min_value=0, max_value=20),
weighted=st.booleans(),
mixed=st.booleans(),
optimizer=st.just(OptimType.LARS_SGD),
long_segments=st.booleans(),
pooling_mode=st.sampled_from(
[
split_table_batched_embeddings_ops.PoolingMode.SUM,
split_table_batched_embeddings_ops.PoolingMode.MEAN,
split_table_batched_embeddings_ops.PoolingMode.NONE,
]
),
use_cpu=st.booleans() if gpu_available else st.just(True),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
suppress_health_check=[HealthCheck.filter_too_much, HealthCheck.data_too_large],
)
@unittest.skipIf(*gpu_unavailable)
def test_backward_optimizers_lars( # noqa C901
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weighted: bool,
mixed: bool,
optimizer: OptimType,
long_segments: bool,
pooling_mode: split_table_batched_embeddings_ops.PoolingMode,
use_cpu: bool,
) -> None:
self.execute_backward_optimizers_(
T,
D,
B,
log_E,
L,
weighted,
mixed,
optimizer,
long_segments,
pooling_mode,
use_cpu,
)
def execute_nbit_forward_(
self,
T: int,
D: int,
B: int,
log_E: int,
L: int,
weighted: bool,
mixed: bool,
pooling_mode: split_table_batched_embeddings_ops.PoolingMode,
weights_ty: SparseType,
use_cache: bool,
cache_algorithm: split_table_batched_embeddings_ops.CacheAlgorithm,
use_cpu: bool,
use_array_for_index_remapping: bool,
mixed_weights_ty: bool,
output_dtype: SparseType,
) -> None:
# NOTE: weighted operation can be done only for SUM.
assume(
pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM
or not weighted
)
# NOTE: No bag ops only work on GPUs, no mixed
assume(
not use_cpu
or pooling_mode != split_table_batched_embeddings_ops.PoolingMode.NONE
)
assume(
not mixed
or pooling_mode != split_table_batched_embeddings_ops.PoolingMode.NONE
)
mode = "sum"
do_pooling = True
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM:
mode = "sum"
elif pooling_mode == split_table_batched_embeddings_ops.PoolingMode.MEAN:
mode = "mean"
else:
mode = "sum"
do_pooling = False
E = int(10 ** log_E)
if not mixed_weights_ty:
weights_ty_list = [weights_ty] * T
else:
weights_ty_list = [
np.random.choice(
[
SparseType.FP32,
SparseType.FP16,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
)
for _ in range(T)
]
D_alignment = max(
1 if ty.bit_rate() % 8 == 0 else int(8 / ty.bit_rate())
for ty in weights_ty_list
)
D = round_up(D, D_alignment)
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(
np.random.randint(low=int(max(0.25 * D, 1)), high=int(1.0 * D)),
D_alignment,
)
for _ in range(T)
]
Ds = [min(D, 128) for D in Ds]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
if do_pooling:
bs = [
to_device(torch.nn.EmbeddingBag(E, D, mode=mode, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
else:
bs = [
to_device(torch.nn.Embedding(E, D, sparse=True), use_cpu)
for (E, D) in zip(Es, Ds)
]
if use_cpu:
managed = [split_table_batched_embeddings_ops.EmbeddingLocation.HOST] * T
elif use_cache:
managed = [
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED_CACHING,
] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = (
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE
if d < average_D
else managed[t]
)
else:
managed = [
np.random.choice(
[
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE,
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED,
]
)
for _ in range(T)
]
xs = [to_device(torch.randint(low=0, high=e, size=(B, L)), use_cpu) for e in Es]
xws = [to_device(torch.randn(size=(B, L)), use_cpu) for _ in range(T)]
xws_acc_type = copy.deepcopy(xws)
cc = split_table_batched_embeddings_ops.IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
W_TY,
split_table_batched_embeddings_ops.EmbeddingLocation(M),
)
for (E, D, M, W_TY) in zip(Es, Ds, managed, weights_ty_list)
],
pooling_mode=pooling_mode,
index_remapping=[torch.arange(E, dtype=torch.int32) for E in Es]
if B != 0
else None,
device="cpu" if use_cpu else torch.cuda.current_device(),
cache_algorithm=cache_algorithm,
use_array_for_index_remapping=use_array_for_index_remapping,
output_dtype=output_dtype,
)
# Initilize the random weights for int nbit table split embedding bag
cc.fill_random_weights()
# NOTE: test TorchScript-compatible!
cc = torch.jit.script(cc)
for t in range(T):
(weights, scale_shift) = cc.split_embedding_weights()[t]
if scale_shift is not None:
(E, R) = scale_shift.shape
self.assertEqual(R, 4)
if weights_ty_list[t] == SparseType.INT2:
scales = np.random.uniform(0.1, 1, size=(E,)).astype(np.float16)
shifts = np.random.uniform(-2, 2, size=(E,)).astype(np.float16)
if weights_ty_list[t] == SparseType.INT4:
scales = np.random.uniform(0.01, 0.1, size=(E,)).astype(np.float16)
shifts = np.random.uniform(-2, 2, size=(E,)).astype(np.float16)
if weights_ty_list[t] == SparseType.INT8:
scales = np.random.uniform(0.001, 0.01, size=(E,)).astype(
np.float16
)
shifts = np.random.uniform(-2, 2, size=(E,)).astype(np.float16)
scale_shift[:, :] = torch.tensor(
np.stack([scales, shifts], axis=1).astype(np.float16).view(np.uint8)
)
for t in range(T):
(weights, scale_shift) = cc.split_embedding_weights()[t]
np_weights = weights.contiguous().cpu().numpy()
if scale_shift is not None:
scale_shift: np.ndarray = (
scale_shift.cpu()
.contiguous()
.numpy()
.view(np.float16)
.astype(np.float32)
)
if weights_ty_list[t] == SparseType.INT4:
(E, D_2) = np_weights.shape
D = D_2 * 2
def comp(i: int) -> np.ndarray:
subs = np_weights.view(np.uint8) >> (i * 4)
sub_mask = subs & 0xF
result = sub_mask.astype(np.float32) * scale_shift[:, 0].reshape(
-1, 1
).astype(np.float32) + scale_shift[:, 1].reshape(-1, 1).astype(
np.float32
)
return result.astype(np.float32)
comps = [comp(i) for i in range(2)]
comps = np.stack(comps)
comps = comps.transpose(1, 2, 0)
comps = comps.reshape(E, D)
bs[t].weight.detach().copy_(to_device(torch.tensor(comps), use_cpu))
elif weights_ty_list[t] == SparseType.INT2:
(E, D_4) = np_weights.shape
D = D_4 * 4
# pyre-fixme[53]: Captured variable `scale_shift` is not annotated.
# pyre-fixme[53]: Captured variable `weights` is not annotated.
def comp(i: int) -> np.ndarray:
subs = np_weights.view(np.uint8) >> (i * 2)
sub_mask = subs & 0x3
result = sub_mask.astype(np.float32) * scale_shift[:, 0].reshape(
-1, 1
).astype(np.float32) + scale_shift[:, 1].reshape(-1, 1).astype(
np.float32
)
return result.astype(np.float32)
comps = [comp(i) for i in range(4)]
comps = np.stack(comps)
comps = comps.transpose(1, 2, 0)
comps = comps.reshape(E, D)
bs[t].weight.detach().copy_(to_device(torch.tensor(comps), use_cpu))
elif weights_ty_list[t] == SparseType.INT8:
(E, D) = np_weights.shape
# pyre-fixme[16]: `Optional` has no attribute `__getitem__`.
comps = np_weights.astype(np.float32) * scale_shift[:, 0].reshape(
-1, 1
).astype(np.float32) + scale_shift[:, 1].reshape(-1, 1).astype(
np.float32
)
bs[t].weight.detach().copy_(to_device(torch.tensor(comps), use_cpu))
elif weights_ty_list[t] == SparseType.FP16:
comps = bs[t].weight.detach().half().cpu().numpy().view(np.uint8)
weights.copy_(torch.tensor(comps))
elif weights_ty_list[t] == SparseType.FP32:
comps = bs[t].weight.detach().float().cpu().numpy().view(np.uint8)
weights.copy_(torch.tensor(comps))
x = torch.cat([x.view(1, B, L) for x in xs], dim=0)
xw = torch.cat([xw.view(1, B, L) for xw in xws_acc_type], dim=0)
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu)
if not use_cpu:
fc2 = (
cc(indices.int(), offsets.int())
if not weighted
else cc(indices.int(), offsets.int(), xw.contiguous().view(-1))
)
else:
cc = cc.cpu()
indices, offsets = indices.cpu(), offsets.cpu()
fc2 = (
cc(indices.int(), offsets.int())
if not weighted
else cc(indices.int(), offsets.int(), xw.contiguous().view(-1).cpu())
)
if do_pooling and B == 0:
self.assertEqual(fc2.size(), (0, cc.total_D))
return
fs = (
[
b_indices(b, x, use_cpu=use_cpu, do_pooling=do_pooling)
for (b, x) in zip(bs, xs)
]
if not weighted
else [
b_indices(
b,
x,
per_sample_weights=xw.view(-1),
use_cpu=use_cpu,
do_pooling=do_pooling,
)
for (b, x, xw) in zip(bs, xs, xws)
]
)
if do_pooling:
f = torch.cat([f.view(B, -1) for f in fs], dim=1)
else:
f = torch.cat(fs, dim=0).view(-1, D)
torch.testing.assert_close(
fc2.float().cpu(),
f.float().cpu(),
atol=1.0e-2,
rtol=1.0e-2,
)
@given(
nbit_weights_ty=get_nbit_weights_ty(),
use_array_for_index_remapping=st.booleans(),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
)
def test_nbit_forward_cpu(
self,
nbit_weights_ty: Optional[SparseType],
use_array_for_index_remapping: bool,
) -> None:
use_cpu = True
T = random.randint(1, 50)
B = random.randint(0, 128)
L = random.randint(0, 32)
D = random.randint(2, 1024)
log_E = random.randint(2, 4)
use_cache = False
# cache_algorithm is don't care as we don't use cache.
cache_algorithm = split_table_batched_embeddings_ops.CacheAlgorithm.LRU
pooling_mode = random.choice(
[
split_table_batched_embeddings_ops.PoolingMode.SUM,
split_table_batched_embeddings_ops.PoolingMode.MEAN,
]
)
mixed = random.choice([True, False])
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
if nbit_weights_ty is None:
# don't care when mixed type is used.
weights_ty: SparseType = SparseType.INT8
mixed_weights_ty = True
else:
weights_ty: SparseType = nbit_weights_ty
mixed_weights_ty = False
output_dtype = random.choice([SparseType.FP32, SparseType.FP16])
self.execute_nbit_forward_(
T,
D,
B,
log_E,
L,
weighted,
mixed,
pooling_mode,
weights_ty,
use_cache,
cache_algorithm,
use_cpu,
use_array_for_index_remapping,
mixed_weights_ty,
output_dtype,
)
@unittest.skipIf(*gpu_unavailable)
@given(
nbit_weights_ty=get_nbit_weights_ty(),
use_array_for_index_remapping=st.booleans(),
)
@settings(
verbosity=Verbosity.verbose,
max_examples=MAX_EXAMPLES_LONG_RUNNING,
deadline=None,
)
def test_nbit_forward_gpu_no_cache(
self,
nbit_weights_ty: Optional[SparseType],
use_array_for_index_remapping: bool,
) -> None:
use_cpu = False
T = random.randint(1, 50)
B = random.randint(0, 128)
L = random.randint(0, 32)
D = random.randint(2, 1024)
log_E = random.randint(2, 4)
use_cache = False
# cache_algorithm is don't care as we don't use cache.
cache_algorithm = split_table_batched_embeddings_ops.CacheAlgorithm.LRU
pooling_mode = random.choice(
[
split_table_batched_embeddings_ops.PoolingMode.SUM,
split_table_batched_embeddings_ops.PoolingMode.MEAN,
split_table_batched_embeddings_ops.PoolingMode.NONE,
]
)
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.NONE:
mixed = False
else:
mixed = random.choice([True, False])
if pooling_mode == split_table_batched_embeddings_ops.PoolingMode.SUM:
weighted = random.choice([True, False])
else:
weighted = False
if nbit_weights_ty is None:
# don't care when mixed type is used.
weights_ty: SparseType = SparseType.INT8
mixed_weights_ty = True
else:
weights_ty: SparseType = nbit_weights_ty
mixed_weights_ty = False
output_dtype = random.choice([SparseType.FP32, SparseType.FP16])
self.execute_nbit_forward_(
T,
D,
B,
log_E,
L,
weighted,
mixed,
pooling_mode,
weights_ty,
use_cache,
cache_algorithm,
use_cpu,
use_array_for_index_remapping,
mixed_weights_ty,
output_dtype,
)
@unittest.skipIf(*gpu_unavailable)
@given(
weights_ty=st.sampled_from(
[
SparseType.FP32,
SparseType.FP16,
SparseType.INT8,
SparseType.INT4,
SparseType.INT2,
]
),
cache_algorithm=st.sampled_from(
split_table_batched_embeddings_ops.CacheAlgorithm
),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_nbit_forward_uvm_cache(
self,
weights_ty: SparseType,
cache_algorithm: split_table_batched_embeddings_ops.CacheAlgorithm,
) -> None:
T = random.randint(1, 5)
B = random.randint(1, 128)
L = random.randint(1, 20)
D = random.randint(2, 256)
log_E = random.randint(3, 5)
mixed = random.choice([True, False])
iters = 3
E = int(10 ** log_E)
D_alignment = (
1 if weights_ty.bit_rate() % 8 == 0 else int(8 / weights_ty.bit_rate())
)
D = round_up(D, D_alignment)
if not mixed:
Ds = [D] * T
Es = [E] * T
else:
Ds = [
round_up(
np.random.randint(low=int(max(0.25 * D, 1)), high=int(1.0 * D)),
D_alignment,
)
for _ in range(T)
]
Es = [
np.random.randint(low=int(0.5 * E), high=int(2.0 * E)) for _ in range(T)
]
managed = [
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED_CACHING
] * T
if mixed:
average_D = sum(Ds) // T
for t, d in enumerate(Ds):
managed[t] = (
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE
if d < average_D
else managed[t]
)
cc_ref = (
split_table_batched_embeddings_ops.IntNBitTableBatchedEmbeddingBagsCodegen(
[
(
"",
E,
D,
weights_ty,
split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE,
)
for (E, D) in zip(Es, Ds)
],
)
)
cc_ref.fill_random_weights()
cc = split_table_batched_embeddings_ops.IntNBitTableBatchedEmbeddingBagsCodegen(
[("", E, D, weights_ty, M) for (E, D, M) in zip(Es, Ds, managed)],
cache_algorithm=cache_algorithm,
)
cc.fill_random_weights()
split_weights = cc.split_embedding_weights()
ref_split_weights = cc_ref.split_embedding_weights()
for t in range(T):
(weights, scale_shift) = split_weights[t]
(ref_weights, ref_scale_shift) = ref_split_weights[t]
self.assertEqual(weights.size(), ref_weights.size())
weights.copy_(ref_weights)
if ref_scale_shift is not None:
scale_shift.copy_(ref_scale_shift)
requests = generate_requests(iters, B, T, L, min(Es), reuse=0.1)
for indices, offsets, _ in requests:
indices = indices.int()
offsets = offsets.int()
output = cc(indices, offsets)
output_ref = cc_ref(indices, offsets)
torch.testing.assert_close(output, output_ref, equal_nan=True)
@given(
T=st.integers(min_value=1, max_value=5),
B=st.integers(min_value=1, max_value=8),
L=st.integers(min_value=0, max_value=8),
use_cpu=st.booleans() if gpu_available else st.just(True),
use_cpu_hashtable=st.booleans(),
use_array_for_index_remapping=st.booleans(),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_pruning(
self,
T: int,
B: int,
L: int,
use_cpu: bool,
use_cpu_hashtable: bool,
use_array_for_index_remapping: bool,
) -> None:
E = int(1000)
LOAD_FACTOR = 0.8
pruning_ratio = 0.5
capacities = [int(B * L / LOAD_FACTOR) + 1 for _ in range(T)]
original_E = int(E / (1.0 - pruning_ratio))
# Enforce the size of original_E/B/L to get the unique indices
assume(original_E > B * L)
current_device = "cpu" if use_cpu else torch.cuda.current_device()
if use_cpu_hashtable:
assume(use_cpu)
indices = torch.randint(low=0, high=original_E, size=(T, B, L))
for t in range(T):
while (
torch.unique(
indices[t], return_counts=False, return_inverse=False
).numel()
!= indices[t].numel()
):
indices[t] = torch.randint(low=0, high=original_E, size=(B, L))
indices = indices.view(-1).int()
dense_indices = torch.randint(low=0, high=E, size=(T, B, L)).view(-1).int()
offsets = torch.tensor([L * b_t for b_t in range(B * T + 1)]).int()
# Initialize and insert Hashmap index remapping based data structure
hash_table = torch.empty(
(sum(capacities), 2),
dtype=torch.int32,
)
hash_table[:, :] = -1
hash_table_offsets = torch.tensor([0] + np.cumsum(capacities).tolist()).long()
torch.ops.fbgemm.pruned_hashmap_insert(
indices, dense_indices, offsets, hash_table, hash_table_offsets
)
if use_cpu_hashtable:
ht = torch.classes.fb.PrunedMapCPU()
ht.insert(indices, dense_indices, offsets, T)
# Initialize and insert Array index remapping based data structure
index_remappings_array = torch.tensor(
[-1] * original_E * T, dtype=torch.int32, device=current_device
)
index_remappings_array_offsets = torch.empty(
T + 1, dtype=torch.int64, device=current_device
)
index_remappings_array_offsets[0] = 0
for t in range(T):
indice_t = (indices.view(T, B, L))[t].long().view(-1).to(current_device)
dense_indice_t = (
(dense_indices.view(T, B, L))[t].view(-1).to(current_device)
)
selected_indices = torch.add(indice_t, t * original_E)[:E]
index_remappings_array[selected_indices] = dense_indice_t
index_remappings_array_offsets[t + 1] = (
index_remappings_array_offsets[t] + original_E
)
# Move data when using device
if not use_cpu:
(
indices,
dense_indices,
offsets,
hash_table,
hash_table_offsets,
index_remappings_array,
index_remappings_array_offsets,
) = (
indices.to(current_device),
dense_indices.to(current_device),
offsets.to(current_device),
hash_table.to(current_device),
hash_table_offsets.to(current_device),
index_remappings_array.to(current_device),
index_remappings_array_offsets.to(current_device),
)
# Lookup
if use_cpu_hashtable:
dense_indices_ = ht.lookup(indices, offsets)
elif not use_array_for_index_remapping: # hashmap based pruning
dense_indices_ = torch.ops.fbgemm.pruned_hashmap_lookup(
indices, offsets, hash_table, hash_table_offsets
)
else: # array based pruning
dense_indices_ = torch.ops.fbgemm.pruned_array_lookup(
indices,
offsets,
index_remappings_array,
index_remappings_array_offsets,
)
# Validate the lookup result
torch.testing.assert_close(dense_indices, dense_indices_)
# For array based pruning, it will be out-of-boundary for arbitrarily
# large indices. We will rely on bound checker to make sure indices
# are within the boundary.
if not use_array_for_index_remapping:
# now, use a value that does not exist in the original set of indices
# and so should be pruned out.
indices[:] = np.iinfo(np.int32).max
if use_cpu_hashtable:
dense_indices_ = ht.lookup(indices, offsets)
elif not use_array_for_index_remapping: # hashmap based pruning
dense_indices_ = torch.ops.fbgemm.pruned_hashmap_lookup(
indices, offsets, hash_table, hash_table_offsets
)
else: # array based pruning
dense_indices_ = torch.ops.fbgemm.pruned_array_lookup(
indices,
offsets,
index_remappings_array,
index_remappings_array_offsets,
)
torch.testing.assert_close(dense_indices.clone().fill_(-1), dense_indices_)
@given(
L=st.integers(min_value=0, max_value=16),
H=st.integers(min_value=512, max_value=1024),
S=st.integers(min_value=0, max_value=128),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_cache_update_function(self, L: int, H: int, S: int) -> None:
# Generate synthetic data
linear_cache_indices_cpu = torch.randint(L, H, (S,))
lxu_cache_locations_cpu = torch.clone(linear_cache_indices_cpu)
indices = [True if np.random.rand() < 0.5 else False for _ in range(S)]
lxu_cache_locations_cpu[indices] = -1
cache_miss_ids = torch.clone(linear_cache_indices_cpu)
cache_miss_ids[lxu_cache_locations_cpu != -1] = -2
# Calculate the correct output
unique_cache_miss_ids = torch.unique(cache_miss_ids)
expect_out = sum(unique_cache_miss_ids >= 0)
linear_cache_indices = to_device(
torch.tensor(linear_cache_indices_cpu, dtype=torch.int64), use_cpu=False
)
lxu_cache_locations = to_device(
torch.tensor(lxu_cache_locations_cpu, dtype=torch.int32), use_cpu=False
)
# Create an abstract split table
D = 8
T = 2
E = 10 ** 3
Ds = [D] * T
Es = [E] * T
emb_op = (
split_table_batched_embeddings_ops.SplitTableBatchedEmbeddingBagsCodegen
)
cc = emb_op(
embedding_specs=[
(
E,
D,
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED_CACHING,
split_table_batched_embeddings_ops.ComputeDevice.CUDA,
)
for (E, D) in zip(Es, Ds)
],
record_cache_metrics=RecordCacheMetrics(True, False),
)
cc._update_cache_miss_counter(lxu_cache_locations, linear_cache_indices)
(
cache_miss_forward_count,
unique_cache_miss_count,
) = cc.get_cache_miss_counter().cpu()
self.assertEqual(unique_cache_miss_count, expect_out)
self.assertLessEqual(cache_miss_forward_count, unique_cache_miss_count)
@given(N=st.integers(min_value=1, max_value=8))
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_cache_miss_counter(self, N: int) -> None:
# Create an abstract split table
D = 8
T = 2
E = 10 ** 3
Ds = [D] * T
Es = [E] * T
emb_op = (
split_table_batched_embeddings_ops.SplitTableBatchedEmbeddingBagsCodegen
)
cc = emb_op(
embedding_specs=[
(
E,
D,
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED_CACHING,
split_table_batched_embeddings_ops.ComputeDevice.CUDA,
)
for (E, D) in zip(Es, Ds)
],
record_cache_metrics=RecordCacheMetrics(True, True),
)
# Create fake input data and the target output
xs = []
x1 = torch.Tensor([[[1], [1]], [[3], [4]]])
x1 = to_device(torch.tensor(x1, dtype=torch.int64), use_cpu=False)
x2 = torch.Tensor([[[2], [1]], [[3], [4]]])
x2 = to_device(torch.tensor(x2, dtype=torch.int64), use_cpu=False)
x3 = torch.Tensor([[[5], [6]], [[7], [8]]])
x3 = to_device(torch.tensor(x3, dtype=torch.int64), use_cpu=False)
xs.append(x1)
xs.append(x2)
xs.append(x3)
target_counter_list = [[1, 3], [2, 4], [3, 8]]
target_tablewise_cache_miss_list = [[1, 2], [2, 2], [4, 4]]
for x, t_counter, t_tablewise_cache_miss in zip(
xs, target_counter_list, target_tablewise_cache_miss_list
):
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu=False)
for _ in range(N):
cc(indices, offsets)
(
cache_miss_forward_count,
unique_cache_miss_count,
) = cc.get_cache_miss_counter().cpu()
tablewise_cache_miss = cc.get_table_wise_cache_miss().cpu()
self.assertEqual(cache_miss_forward_count, t_counter[0])
self.assertEqual(unique_cache_miss_count, t_counter[1])
for i in range(len(tablewise_cache_miss)):
self.assertEqual(tablewise_cache_miss[i], t_tablewise_cache_miss[i])
@unittest.skipIf(*gpu_unavailable)
@given(
L=st.integers(min_value=0, max_value=16),
H=st.integers(min_value=512, max_value=1024),
S=st.integers(min_value=0, max_value=128),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_nbit_cache_update_function(self, L: int, H: int, S: int) -> None:
# Generate synthetic data
linear_cache_indices_cpu = torch.randint(L, H, (S,))
lxu_cache_locations_cpu = torch.clone(linear_cache_indices_cpu)
indices = [True if np.random.rand() < 0.5 else False for _ in range(S)]
lxu_cache_locations_cpu[indices] = -1
cache_miss_ids = torch.clone(linear_cache_indices_cpu)
cache_miss_ids[lxu_cache_locations_cpu != -1] = -2
# Calculate the correct output
unique_cache_miss_ids = torch.unique(cache_miss_ids)
expect_out = sum(unique_cache_miss_ids >= 0)
linear_cache_indices = linear_cache_indices_cpu.to(torch.int32).cuda()
lxu_cache_locations = lxu_cache_locations_cpu.to(torch.int32).cuda()
# Create an abstract split table
D = 8
T = 2
E = 10 ** 3
Ds = [D] * T
Es = [E] * T
cc = split_table_batched_embeddings_ops.IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
SparseType.INT8,
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED_CACHING,
)
for (E, D) in zip(Es, Ds)
],
device=torch.cuda.current_device(),
record_cache_metrics=RecordCacheMetrics(True, False),
)
cc.fill_random_weights()
cc._update_cache_miss_counter(lxu_cache_locations, linear_cache_indices)
(
cache_miss_forward_count,
unique_cache_miss_count,
) = cc.get_cache_miss_counter().cpu()
self.assertEqual(unique_cache_miss_count, expect_out)
self.assertLessEqual(cache_miss_forward_count, unique_cache_miss_count)
@unittest.skipIf(*gpu_unavailable)
@given(N=st.integers(min_value=1, max_value=8))
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_nbit_cache_miss_counter(self, N: int) -> None:
# Create an abstract split table
D = 8
T = 2
E = 10 ** 3
Ds = [D] * T
Es = [E] * T
cc = split_table_batched_embeddings_ops.IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
"",
E,
D,
SparseType.INT8,
split_table_batched_embeddings_ops.EmbeddingLocation.MANAGED_CACHING,
)
for (E, D) in zip(Es, Ds)
],
device=torch.cuda.current_device(),
record_cache_metrics=RecordCacheMetrics(True, True),
)
cc.fill_random_weights()
# Create fake input data and the target output
x1 = torch.Tensor([[[1], [1]], [[3], [4]]]).cuda()
x2 = torch.Tensor([[[2], [1]], [[3], [4]]]).cuda()
x3 = torch.Tensor([[[5], [6]], [[7], [8]]]).cuda()
xs = [x1, x2, x3]
target_counter_list = [[1, 3], [2, 4], [3, 8]]
target_tablewise_cache_miss_list = [[1, 2], [2, 2], [4, 4]]
for x, t_counter, t_tablewise_cache_miss in zip(
xs, target_counter_list, target_tablewise_cache_miss_list
):
(indices, offsets) = get_table_batched_offsets_from_dense(x, use_cpu=False)
for _ in range(N):
cc(indices.int(), offsets.int())
(
cache_miss_forward_count,
unique_cache_miss_count,
) = cc.get_cache_miss_counter().cpu()
tablewise_cache_miss = cc.get_table_wise_cache_miss().cpu()
self.assertEqual(cache_miss_forward_count, t_counter[0])
self.assertEqual(unique_cache_miss_count, t_counter[1])
for i in range(len(tablewise_cache_miss)):
self.assertEqual(tablewise_cache_miss[i], t_tablewise_cache_miss[i])
@given(
T=st.integers(min_value=1, max_value=64),
B=st.integers(min_value=1, max_value=64),
max_L=st.integers(min_value=1, max_value=64),
bounds_check_mode=st.sampled_from(
[
BoundsCheckMode.FATAL,
BoundsCheckMode.WARNING,
BoundsCheckMode.IGNORE,
]
),
use_cpu=st.booleans() if gpu_available else st.just(True),
dtype=st.sampled_from(
[
torch.int64,
torch.int32,
]
),
)
@settings(verbosity=Verbosity.verbose, max_examples=MAX_EXAMPLES, deadline=None)
def test_bounds_check(
self,
T: int,
B: int,
max_L: int,
bounds_check_mode: BoundsCheckMode,
use_cpu: bool,
dtype: torch.dtype,
) -> None:
rows_per_table = torch.tensor(
np.random.randint(low=1, high=1000, size=(T,))
).long()
Ls = np.random.randint(low=0, high=max_L, size=(T, B))
indices = [
np.random.randint(low=0, high=rows_per_table[t], size=Ls[t, b])
for t in range(T)
for b in range(B)
]
indices = torch.tensor(np.concatenate(indices, axis=0)).to(dtype)
offsets = torch.tensor([0] + np.cumsum(Ls.flatten()).tolist()).to(dtype)
warning = torch.tensor([0]).long()
self.assertEqual(indices.numel(), np.sum(Ls).item())
self.assertEqual(offsets[-1], np.sum(Ls).item())
if not use_cpu:
indices, offsets, rows_per_table, warning = (
indices.cuda(),
offsets.cuda(),
rows_per_table.cuda(),
warning.cuda(),
)
indices_copy = indices.clone()
torch.ops.fbgemm.bounds_check_indices(
rows_per_table, indices, offsets, bounds_check_mode, warning
)
# we don't modify when we are in-bounds.
torch.testing.assert_close(indices_copy, indices)
indices[:] = torch.iinfo(dtype).max
if bounds_check_mode != BoundsCheckMode.FATAL:
torch.ops.fbgemm.bounds_check_indices(
rows_per_table, indices, offsets, bounds_check_mode, warning
)
torch.testing.assert_close(indices, torch.zeros_like(indices))
if bounds_check_mode == BoundsCheckMode.WARNING:
self.assertEqual(warning.item(), indices.numel())
else:
if use_cpu and indices.numel():
with self.assertRaises(RuntimeError):
torch.ops.fbgemm.bounds_check_indices(
rows_per_table, indices, offsets, bounds_check_mode, warning
)
# It would be nice to test the CUDA implementation of BoundsCheckMode==FATAL,
# but the device assert kills the CUDA context and requires a process restart,
# which is a bit inconvenient.
# test offsets bound errors
indices = indices_copy.clone()
if offsets.numel() > 0:
offsets[0] = -100
if offsets.numel() > 1:
offsets[-1] += 100
if bounds_check_mode != BoundsCheckMode.FATAL:
torch.ops.fbgemm.bounds_check_indices(
rows_per_table, indices, offsets, bounds_check_mode, warning
)
if offsets.numel() > 0:
self.assertEqual(offsets[0].item(), 0)
if offsets.numel() > 1:
self.assertEqual(offsets[-1].item(), indices.numel())
if bounds_check_mode == BoundsCheckMode.WARNING:
# -1 because when we have 2 elements in offsets, we have only 1
# warning for the pair.
self.assertGreaterEqual(warning.item(), min(2, offsets.numel() - 1))
else:
if use_cpu and indices.numel():
with self.assertRaises(RuntimeError):
torch.ops.fbgemm.bounds_check_indices(
rows_per_table, indices, offsets, bounds_check_mode, warning
)
def test_pickle(self) -> None:
tensor_queue = torch.classes.fbgemm.TensorQueue(torch.empty(0))
pickled = pickle.dumps(tensor_queue)
unpickled = pickle.loads(pickled)
@unittest.skipIf(*gpu_unavailable)
def test_linearize_cache_indices(self) -> None:
indices = torch.tensor(
[10, 2, 3, 7, 1, 4, 5, 9, 2, 7, 6, 8, 5, 1, 0, 4],
dtype=torch.int,
device="cuda",
)
equal_offsets = torch.tensor([0, 4, 8, 12, 16], dtype=torch.int, device="cuda")
varying_offsets = torch.tensor(
[0, 1, 3, 6, 8, 10, 14, 15, 16], dtype=torch.int, device="cuda"
)
# Testing equal sized tables.
cache_hash_size_cumsum_0 = torch.tensor([0, 12, 24, 36, 48]).cuda()
linear_cache_indices_0 = torch.ops.fbgemm.linearize_cache_indices(
cache_hash_size_cumsum_0, indices, equal_offsets
)
self.assertTrue(
torch.equal(
linear_cache_indices_0.cpu(),
torch.tensor(
[10, 2, 3, 7, 13, 16, 17, 21, 26, 31, 30, 32, 41, 37, 36, 40],
dtype=torch.int,
),
)
)
# Testing partially cached tables.
cache_hash_size_cumsum_1 = torch.tensor([0, 12, -1, 24, 36]).cuda()
linear_cache_indices_1 = torch.ops.fbgemm.linearize_cache_indices(
cache_hash_size_cumsum_1, indices, equal_offsets
)
self.assertTrue(
torch.equal(
linear_cache_indices_1.cpu(),
torch.tensor(
[10, 2, 3, 7, 13, 16, 17, 21, 36, 36, 36, 36, 29, 25, 24, 28],
dtype=torch.int,
),
)
)
# Testing batched with varying pooling factor.
cache_hash_size_cumsum_2 = torch.tensor([0, 12, -1, 24, 36]).cuda()
linear_cache_indices_2 = torch.ops.fbgemm.linearize_cache_indices(
cache_hash_size_cumsum_2, indices, varying_offsets
)
self.assertTrue(
torch.equal(
linear_cache_indices_2.cpu(),
torch.tensor(
[10, 2, 3, 19, 13, 16, 17, 21, 36, 36, 36, 36, 36, 36, 24, 28],
dtype=torch.int,
),
)
)
# Testing when multiple features share the same table.
cache_hash_size_cumsum_3 = torch.tensor([0, 0, 12, 12, 24]).cuda()
linear_cache_indices_3 = torch.ops.fbgemm.linearize_cache_indices(
cache_hash_size_cumsum_3, indices, varying_offsets
)
self.assertTrue(
torch.equal(
linear_cache_indices_3.cpu(),
torch.tensor(
[10, 2, 3, 7, 1, 4, 5, 9, 14, 19, 18, 20, 17, 13, 12, 16],
dtype=torch.int,
),
)
)
@unittest.skipIf(*gpu_unavailable)
def test_lxu_cache_lookup(self) -> None:
ASSOC: int = split_table_batched_embeddings_ops.ASSOC
max_index: int = 8000
# Use single cache set to avoid dealing with cache set hash algorithm.
lxu_cache_state_gpu = torch.arange(ASSOC, dtype=torch.int64).unsqueeze(0).cuda()
# Testing all miss.
linear_cache_indices_0 = torch.tensor(
[32, 33, 34, 35, 36, 100, 1000, 1725]
).cuda()
lxu_locations = torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices_0, lxu_cache_state_gpu, max_index
)
self.assertTrue(
torch.equal(
lxu_locations.cpu(),
torch.tensor(
[-1, -1, -1, -1, -1, -1, -1, -1],
dtype=torch.int,
),
)
)
# Testing all hits.
cache_indices_1 = torch.randint(0, ASSOC, (ASSOC,))
linear_cache_indices_1 = cache_indices_1.cuda()
lxu_locations = torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices_1, lxu_cache_state_gpu, max_index
)
self.assertTrue(
torch.equal(
lxu_locations.cpu(),
cache_indices_1.int(),
)
)
# Testing mixture.
miss_cache_indices_0 = torch.randint(ASSOC, max_index, (10,))
hit_cache_indices_0 = torch.randint(0, ASSOC, (8,))
miss_cache_indices_1 = torch.randint(ASSOC, max_index, (16,))
hit_cache_indices_1 = torch.randint(0, ASSOC, (8,))
linear_cache_indices_2 = torch.cat(
[
miss_cache_indices_0,
hit_cache_indices_0,
miss_cache_indices_1,
hit_cache_indices_1,
]
).cuda()
lxu_locations = torch.ops.fbgemm.lxu_cache_lookup(
linear_cache_indices_2, lxu_cache_state_gpu, max_index
)
expected_result = torch.cat(
[
torch.full_like(miss_cache_indices_0, -1),
hit_cache_indices_0,
torch.full_like(miss_cache_indices_1, -1),
hit_cache_indices_1,
]
).int()
self.assertTrue(
torch.equal(
lxu_locations.cpu(),
expected_result,
)
)
if __name__ == "__main__":
unittest.main()
| 35.560168 | 96 | 0.543131 |
efcf839b959ddd8c3aff12ec1e709c1f0a57aba0
| 10,401 |
py
|
Python
|
day_08/main.py
|
tusharsadhwani/aoc2021
|
8780760208e40eb6bc64f0726b5b9c2559df0633
|
[
"MIT"
] | null | null | null |
day_08/main.py
|
tusharsadhwani/aoc2021
|
8780760208e40eb6bc64f0726b5b9c2559df0633
|
[
"MIT"
] | null | null | null |
day_08/main.py
|
tusharsadhwani/aoc2021
|
8780760208e40eb6bc64f0726b5b9c2559df0633
|
[
"MIT"
] | null | null | null |
"""
--- Day 8: Seven Segment Search ---
You barely reach the safety of the cave when the whale smashes into the
cave mouth, collapsing it. Sensors indicate another exit to this cave at
a much greater depth, so you have no choice but to press on.
As your submarine slowly makes its way through the cave system, you
notice that the four-digit seven-segment displays in your submarine are
malfunctioning; they must have been damaged during the escape. You'll be
in a lot of trouble without them, so you'd better figure out what's
wrong.
Each digit of a seven-segment display is rendered by turning on or off
any of seven segments named a through g:
0: 1: 2: 3: 4:
aaaa .... aaaa aaaa ....
b c . c . c . c b c
b c . c . c . c b c
.... .... dddd dddd dddd
e f . f e . . f . f
e f . f e . . f . f
gggg .... gggg gggg ....
5: 6: 7: 8: 9:
aaaa aaaa aaaa aaaa aaaa
b . b . . c b c b c
b . b . . c b c b c
dddd dddd .... dddd dddd
. f e f . f e f . f
. f e f . f e f . f
gggg gggg .... gggg gggg
So, to render a 1, only segments c and f would be turned on; the rest
would be off. To render a 7, only segments a, c, and f would be turned
on.
The problem is that the signals which control the segments have been
mixed up on each display. The submarine is still trying to display
numbers by producing output on signal wires a through g, but those wires
are connected to segments randomly. Worse, the wire/segment connections
are mixed up separately for each four-digit display! (All of the digits
within a display use the same connections, though.)
So, you might know that only signal wires b and g are turned on, but
that doesn't mean segments b and g are turned on: the only digit that
uses two segments is 1, so it must mean segments c and f are meant to be
on. With just that information, you still can't tell which wire (b/g)
goes to which segment (c/f). For that, you'll need to collect more
information.
For each display, you watch the changing signals for a while, make a
note of all ten unique signal patterns you see, and then write down a
single four digit output value (your puzzle input). Using the signal
patterns, you should be able to work out which pattern corresponds to
which digit.
For example, here is what you might see in a single entry in your notes:
acedgfb cdfbe gcdfa fbcad dab cefabd cdfgeb eafb cagedb ab |
cdfeb fcadb cdfeb cdbaf
(The entry is wrapped here to two lines so it fits; in your notes, it
will all be on a single line.)
Each entry consists of ten unique signal patterns, a | delimiter, and
finally the four digit output value. Within an entry, the same
wire/segment connections are used (but you don't know what the
connections actually are). The unique signal patterns correspond to the
ten different ways the submarine tries to render a digit using the
current wire/segment connections. Because 7 is the only digit that uses
three segments, dab in the above example means that to render a 7,
signal lines d, a, and b are on. Because 4 is the only digit that uses
four segments, eafb means that to render a 4, signal lines e, a, f, and
b are on.
Using this information, you should be able to work out which combination
of signal wires corresponds to each of the ten digits. Then, you can
decode the four digit output value. Unfortunately, in the above example,
all of the digits in the output value (cdfeb fcadb cdfeb cdbaf) use five
segments and are more difficult to deduce.
For now, focus on the easy digits. Consider this larger example:
be cfbegad cbdgef fgaecd cgeb fdcge agebfd fecdb fabcd edb |
fdgacbe cefdb cefbgd gcbe
edbfga begcd cbg gc gcadebf fbgde acbgfd abcde gfcbed gfec |
fcgedb cgb dgebacf gc
fgaebd cg bdaec gdafb agbcfd gdcbef bgcad gfac gcb cdgabef |
cg cg fdcagb cbg
fbegcd cbd adcefb dageb afcb bc aefdc ecdab fgdeca fcdbega |
efabcd cedba gadfec cb
aecbfdg fbg gf bafeg dbefa fcge gcbea fcaegb dgceab fcbdga |
gecf egdcabf bgf bfgea
fgeab ca afcebg bdacfeg cfaedg gcfdb baec bfadeg bafgc acf |
gebdcfa ecba ca fadegcb
dbcfg fgd bdegcaf fgec aegbdf ecdfab fbedc dacgb gdcebf gf |
cefg dcbef fcge gbcadfe
bdfegc cbegaf gecbf dfcage bdacg ed bedf ced adcbefg gebcd |
ed bcgafe cdgba cbgef
egadfb cdbfeg cegd fecab cgb gbdefca cg fgcdab egfdb bfceg |
gbdfcae bgc cg cgb
gcafb gcf dcaebfg ecagb gf abcdeg gaef cafbge fdbac fegbdc |
fgae cfgab fg bagce
Because the digits 1, 4, 7, and 8 each use a unique number of segments,
you should be able to tell which combinations of signals correspond to
those digits. Counting only digits in the output values (the part after
| on each line), in the above example, there are 26 instances of digits
that use a unique number of segments.
In the output values, how many times do digits 1, 4, 7, or 8 appear?
--- Part Two ---
Through a little deduction, you should now be able to determine the
remaining digits. Consider again the first example above:
acedgfb cdfbe gcdfa fbcad dab cefabd cdfgeb eafb cagedb ab |
cdfeb fcadb cdfeb cdbaf
After some careful analysis, the mapping between signal wires and
segments only make sense in the following configuration:
dddd
e a
e a
ffff
g b
g b
cccc
So, the unique signal patterns would correspond to the following digits:
- acedgfb: 8
- cdfbe: 5
- gcdfa: 2
- fbcad: 3
- dab: 7
- cefabd: 9
- cdfgeb: 6
- eafb: 4
- cagedb: 0
- ab: 1
Then, the four digits of the output value can be decoded:
- cdfeb: 5
- fcadb: 3
- cdfeb: 5
- cdbaf: 3
Therefore, the output value for this entry is 5353.
Following this same process for each entry in the second, larger example
above, the output value of each entry can be determined:
- fdgacbe cefdb cefbgd gcbe: 8394
- fcgedb cgb dgebacf gc: 9781
- cg cg fdcagb cbg: 1197
- efabcd cedba gadfec cb: 9361
- gecf egdcabf bgf bfgea: 4873
- gebdcfa ecba ca fadegcb: 8418
- cefg dcbef fcge gbcadfe: 4548
- ed bcgafe cdgba cbgef: 1625
- gbdfcae bgc cg cgb: 8717
- fgae cfgab fg bagce: 4315
Adding all of the output values in this larger example produces 61229.
For each entry, determine all of the wire/segment connections and decode
the four-digit output values. What do you get if you add up all of the
output values?
"""
import os
from itertools import permutations
from typing import Sequence
Digit = frozenset[str]
def parse_input(data: str) -> tuple[list[list[Digit]], list[list[Digit]]]:
all_digits: list[list[Digit]] = []
input_digits: list[list[Digit]] = []
for line in data.splitlines():
digits_text, input_text = line.split("|")
all_digits.append([frozenset(segments) for segments in digits_text.split()])
input_digits.append([frozenset(segments) for segments in input_text.split()])
return all_digits, input_digits
def part1(data: str) -> int:
_, input_digits = parse_input(data)
count_1478 = 0
for row in input_digits:
for digits in row:
if len(digits) not in (5, 6):
count_1478 += 1
return count_1478
def get_set_from_word(permutation: Sequence[int], digit: Digit) -> set[int]:
"""
Returns a digit set from a given digit word,
based on the current permutation.
i.e. if:
permutation = [6, 5, 4, 3, 2, 1, 0]
digit = 'abcd'
then output = {6, 5, 4, 3}
"""
return {permutation[ord(char) - ord("a")] for char in digit}
def part2(data: str) -> int:
"""
Assuming the seven segment display in this order:
0000
1 2
1 2
3333
4 5
4 5
6666
The digits are defined by the activated segments.
The idea is to generate all possible permutations of a to g
mappings between these 1-7 values, and then vlalidate the words
against them until we find one in which none of the given words
generate an invalid number.
"""
digit_sets = (
{0, 1, 2, 4, 5, 6}, # Zero
{2, 5}, # One
{0, 2, 3, 4, 6}, # Two
{0, 2, 3, 5, 6}, # Three
{1, 2, 3, 5}, # Four
{0, 1, 3, 5, 6}, # Five
{0, 1, 3, 4, 5, 6}, # Six
{0, 2, 5}, # Seven
{0, 1, 2, 3, 4, 5, 6}, # Eight
{0, 1, 2, 3, 5, 6}, # Nine
)
total = 0
all_digits, input_digits = parse_input(data)
for all_digit_row, input_row in zip(all_digits, input_digits):
for permutation in permutations(range(7), 7):
for digit_word in all_digit_row:
digit_set = get_set_from_word(permutation, digit_word)
if digit_set not in digit_sets:
# Invalid digit found. On to the next permutation.
break
else:
number = 0
for digit_word in input_row:
digit_set = get_set_from_word(permutation, digit_word)
digit = digit_sets.index(digit_set)
number = number * 10 + digit
total += number
break # Found the number. Onto the next row.
return total
test_data = """\
be cfbegad cbdgef fgaecd cgeb fdcge agebfd fecdb fabcd edb | fdgacbe cefdb cefbgd gcbe
edbfga begcd cbg gc gcadebf fbgde acbgfd abcde gfcbed gfec | fcgedb cgb dgebacf gc
fgaebd cg bdaec gdafb agbcfd gdcbef bgcad gfac gcb cdgabef | cg cg fdcagb cbg
fbegcd cbd adcefb dageb afcb bc aefdc ecdab fgdeca fcdbega | efabcd cedba gadfec cb
aecbfdg fbg gf bafeg dbefa fcge gcbea fcaegb dgceab fcbdga | gecf egdcabf bgf bfgea
fgeab ca afcebg bdacfeg cfaedg gcfdb baec bfadeg bafgc acf | gebdcfa ecba ca fadegcb
dbcfg fgd bdegcaf fgec aegbdf ecdfab fbedc dacgb gdcebf gf | cefg dcbef fcge gbcadfe
bdfegc cbegaf gecbf dfcage bdacg ed bedf ced adcbefg gebcd | ed bcgafe cdgba cbgef
egadfb cdbfeg cegd fecab cgb gbdefca cg fgcdab egfdb bfceg | gbdfcae bgc cg cgb
gcafb gcf dcaebfg ecagb gf abcdeg gaef cafbge fdbac fegbdc | fgae cfgab fg bagce
"""
def test_part1() -> None:
assert part1(test_data) == 26
def test_part2() -> None:
assert part2(test_data) == 61229
def main() -> None:
with open(os.path.join(os.path.dirname(__file__), "input")) as file:
data = file.read()
print(part1(data))
print(part2(data))
if __name__ == "__main__":
main()
| 34.213816 | 86 | 0.680511 |
f7a579f1ca176702a9824450d79c0bd2e1cbde6e
| 853 |
py
|
Python
|
score_following_game/agents/optim_utils.py
|
CPJKU/score_following_game
|
f04e977b09cceb49588dcf8e216909d815a54519
|
[
"MIT"
] | 43 |
2018-07-18T14:00:04.000Z
|
2022-01-10T07:34:35.000Z
|
score_following_game/agents/optim_utils.py
|
CPJKU/score_following_game
|
f04e977b09cceb49588dcf8e216909d815a54519
|
[
"MIT"
] | 10 |
2019-05-08T07:38:26.000Z
|
2022-01-13T15:06:16.000Z
|
score_following_game/agents/optim_utils.py
|
CPJKU/score_following_game
|
f04e977b09cceb49588dcf8e216909d815a54519
|
[
"MIT"
] | 12 |
2018-10-10T09:58:49.000Z
|
2021-12-02T16:45:44.000Z
|
import numpy as np
import torch.optim as optim
from ast import literal_eval as make_tuple
def cast_optim_params(optim_params):
"""
:param optim_params:
:return:
"""
for k in optim_params.keys():
# check if argument is a parameter tuple
if isinstance(optim_params[k], str) and '(' in optim_params[k]:
optim_params[k] = make_tuple(optim_params[k])
else:
try:
optim_params[k] = np.float(optim_params[k])
except:
pass
return optim_params
def get_optimizer(optimizer_name, params, **kwargs):
"""
Compile pytorch optimizer
:param optimizer_name:
:param params:
:param kwargs:
:return:
"""
constructor = getattr(optim, optimizer_name)
optimizer = constructor(params, **kwargs)
return optimizer
| 21.325 | 71 | 0.621336 |
b3ea6c8d4ab1549c4efb2f076ba0578bbbe33d0f
| 15,640 |
py
|
Python
|
avwx/service/scrape.py
|
avwx-rest/avwx-engine
|
af0ec16630d1d79bca19f2610b5d2b84a782333f
|
[
"MIT"
] | 25 |
2019-11-27T05:33:04.000Z
|
2022-02-05T04:04:44.000Z
|
avwx/service/scrape.py
|
avwx-rest/avwx-engine
|
af0ec16630d1d79bca19f2610b5d2b84a782333f
|
[
"MIT"
] | 13 |
2019-11-18T17:03:54.000Z
|
2021-09-04T03:53:55.000Z
|
avwx/service/scrape.py
|
avwx-rest/avwx-engine
|
af0ec16630d1d79bca19f2610b5d2b84a782333f
|
[
"MIT"
] | 16 |
2019-11-18T01:55:49.000Z
|
2021-09-20T03:22:58.000Z
|
"""
Classes for retrieving raw report strings via web scraping
"""
# pylint: disable=arguments-differ,invalid-name
# stdlib
import asyncio as aio
import json
import random
from typing import Any, List, Optional, Tuple, Union
# library
from xmltodict import parse as parsexml # type: ignore
# module
from avwx.parsing.core import dedupe
from avwx.exceptions import InvalidRequest
from avwx.station import valid_station
from avwx.service.base import CallsHTTP, Service
USER_AGENTS = [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.3 Safari/605.1.15"
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.1 Safari/605.1.15",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:77.0) Gecko/20100101 Firefox/77.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
]
class ScrapeService(Service, CallsHTTP): # pylint: disable=too-few-public-methods
"""Service class for fetching reports via direct web requests"""
_valid_types: Tuple[str, ...] = ("metar", "taf")
_strip_whitespace: bool = True
def _make_err(self, body: str, key: str = "report path") -> InvalidRequest:
"""Returns an InvalidRequest exception with formatted error message"""
msg = f"Could not find {key} in {self.__class__.__name__} response\n"
return InvalidRequest(msg + body)
@staticmethod
def _make_headers() -> dict:
"""Returns request headers"""
return {}
def _post_data( # pylint: disable=no-self-use
self, station: str # pylint: disable=unused-argument
) -> dict:
"""Returns the POST form/data payload"""
return {}
def _clean_report(self, report: Any) -> Any:
"""Replaces all *whitespace elements with a single space if enabled"""
if not self._strip_whitespace:
return report
if isinstance(report, list):
return dedupe(" ".join(r.split()) for r in report)
if isinstance(report, str):
return " ".join(report.split())
return report
class StationScrape(ScrapeService):
"""Service class fetching reports from a station ICAO"""
def _make_url(self, station: str) -> Tuple[str, dict]:
"""Returns a formatted URL and parameters"""
raise NotImplementedError()
def _extract(self, raw: str, station: str) -> str:
"""Extracts the report string from the service response"""
raise NotImplementedError()
def _simple_extract(self, raw: str, starts: Union[str, List[str]], end: str) -> str:
"""Simple extract by cutting at sequential start and end points"""
if isinstance(starts, str):
targets = [starts]
else:
targets = starts
for target in targets:
index = raw.find(target)
if index == -1:
raise self._make_err("The station might not exist")
raw = raw[index:]
report = raw[: raw.find(end)].strip()
return " ".join(dedupe(report.split()))
async def _fetch(self, station: str, url: str, params: dict, timeout: int) -> str:
headers = self._make_headers()
data = self._post_data(station) if self.method.lower() == "post" else None
text = await self._call(
url, params=params, headers=headers, data=data, timeout=timeout
)
report = self._extract(text, station)
return self._clean_report(report)
def fetch(
self,
station: str,
timeout: int = 10,
) -> str:
"""Fetches a report string from the service"""
return aio.run(self.async_fetch(station, timeout))
async def async_fetch(self, station: str, timeout: int = 10) -> str:
"""Asynchronously fetch a report string from the service"""
valid_station(station)
url, params = self._make_url(station)
return await self._fetch(station, url, params, timeout)
# Multiple sources for NOAA data
class NOAA_ADDS(ScrapeService):
"""Requests data from NOAA ADDS"""
url = "https://aviationweather.gov/adds/dataserver_current/httpparam"
_valid_types = ("metar", "taf", "aircraftreport")
_rtype_map = {"airep": "aircraftreport"}
_targets = {"metar": "METAR", "taf": "TAF", "aircraftreport": "AircraftReport"}
_coallate = ("aircraftreport",)
def __init__(self, request_type: str):
super().__init__(self._rtype_map.get(request_type, request_type))
def _make_url(
self, station: Optional[str], lat: Optional[float], lon: Optional[float]
) -> Tuple[str, dict]:
"""Returns a formatted URL and parameters"""
# Base request params
params = {
"requestType": "retrieve",
"format": "XML",
"hoursBeforeNow": 2,
"dataSource": self.report_type + "s",
}
if self.report_type == "aircraftreport":
params["radialDistance"] = f"200;{lon},{lat}"
else:
params["stationString"] = station
return self.url, params
def _extract(self, raw: str) -> Union[str, List[str]]:
"""Extracts the raw_report element from XML response"""
resp = parsexml(raw)
try:
data = resp["response"]["data"]
if data["@num_results"] == "0":
return ""
reports = data[self._targets[self.report_type]]
except KeyError as key_error:
raise self._make_err(raw) from key_error
# Only one report exists
if isinstance(reports, dict):
ret = reports["raw_text"]
if self.report_type in self._coallate:
ret = [ret]
# Multiple reports exist
elif isinstance(reports, list) and reports:
if self.report_type in self._coallate:
ret = [r["raw_text"] for r in reports]
else:
ret = reports[0]["raw_text"]
# Something went wrong
else:
raise self._make_err(raw, '"raw_text"')
return ret
def fetch(
self,
station: str = None,
lat: float = None,
lon: float = None,
timeout: int = 10,
) -> Union[str, List[str]]:
"""Fetches a report string from the service"""
return aio.run(self.async_fetch(station, lat, lon, timeout))
async def async_fetch(
self,
station: str = None,
lat: float = None,
lon: float = None,
timeout: int = 10,
) -> Union[str, List[str]]:
"""Asynchronously fetch a report string from the service"""
if station:
valid_station(station)
elif lat is None or lon is None:
raise ValueError("No valid fetch parameters")
url, params = self._make_url(station, lat, lon)
text = await self._call(url, params=params, timeout=timeout)
report = self._extract(text)
return self._clean_report(report)
class NOAA_FTP(StationScrape):
"""Requests data from NOAA via FTP"""
url = "https://tgftp.nws.noaa.gov/data/{}/{}/stations/{}.TXT"
def _make_url(self, station: str) -> Tuple[str, dict]:
"""Returns a formatted URL and parameters"""
root = "forecasts" if self.report_type == "taf" else "observations"
return self.url.format(root, self.report_type, station), {}
def _extract(self, raw: str, station: str) -> str:
"""Extracts the report using string finding"""
raw = raw[raw.find(station) :]
return raw[: raw.find('"')]
class NOAA_Scrape(StationScrape):
"""Requests data from NOAA via site scraping"""
url = "https://aviationweather.gov/{}/data"
def _make_url(self, station: str) -> Tuple[str, dict]:
"""Returns a formatted URL and parameters"""
hours = 7 if self.report_type == "taf" else 2
return (
self.url.format(self.report_type),
{"ids": station, "format": "raw", "hours": hours},
)
def _extract(self, raw: str, station: str) -> str:
"""Extracts the report using string finding"""
raw = raw[raw.find("<code>") :]
raw = raw[raw.find(station) :]
raw = raw[: raw.find("</code>")]
for char in ("<br/>", " "):
raw = raw.replace(char, " ")
return raw
NOAA = NOAA_Scrape
# Regional data sources
class AMO(StationScrape):
"""Requests data from AMO KMA for Korean stations"""
url = "http://amoapi.kma.go.kr/amoApi/{}"
def _make_url(self, station: str) -> Tuple[str, dict]:
"""Returns a formatted URL and parameters"""
return self.url.format(self.report_type), {"icao": station}
def _extract(self, raw: str, station: str) -> str:
"""Extracts the report message from XML response"""
resp = parsexml(raw)
try:
report = resp["response"]["body"]["items"]["item"][
self.report_type.lower() + "Msg"
]
except KeyError as key_error:
raise self._make_err(raw) from key_error
if not report:
raise self._make_err("The station might not exist")
# Replace line breaks
report = report.replace("\n", "")
# Remove excess leading and trailing data
for item in (self.report_type.upper(), "SPECI"):
if report.startswith(item + " "):
report = report[len(item) + 1 :]
report = report.rstrip("=")
# Make every element single-spaced and stripped
return " ".join(report.split())
class MAC(StationScrape):
"""Requests data from Meteorologia Aeronautica Civil for Columbian stations"""
url = "http://meteorologia.aerocivil.gov.co/expert_text_query/parse"
method = "POST"
def _make_url(self, station: str) -> Tuple[str, dict]:
"""Returns a formatted URL and parameters"""
return self.url, {"query": f"{self.report_type} {station}"}
def _extract(self, raw: str, station: str) -> str:
"""Extracts the report message using string finding"""
return self._simple_extract(raw, station.upper() + " ", "=")
class AUBOM(StationScrape):
"""Requests data from the Australian Bureau of Meteorology"""
url = "http://www.bom.gov.au/aviation/php/process.php"
method = "POST"
def _make_url(self, _) -> Tuple[str, dict]:
"""Returns a formatted URL and empty parameters"""
return self.url, {}
@staticmethod
def _make_headers() -> dict:
"""Returns request headers"""
return {
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "*/*",
"Accept-Language": "en-us",
"Accept-Encoding": "gzip, deflate",
"Host": "www.bom.gov.au",
"Origin": "http://www.bom.gov.au",
"User-Agent": random.choice(USER_AGENTS),
"Connection": "keep-alive",
}
def _post_data(self, station: str) -> dict:
"""Returns the POST form"""
return {"keyword": station, "type": "search", "page": "TAF"}
def _extract(self, raw: str, station: str) -> str:
"""Extracts the reports from HTML response"""
index = 1 if self.report_type == "taf" else 2
try:
report = raw.split("<p")[index]
report = report[report.find(">") + 1 :]
except IndexError as index_error:
raise self._make_err("The station might not exist") from index_error
if report.startswith("<"):
return ""
report = report[: report.find("</p>")]
return report.replace("<br />", " ")
class OLBS(StationScrape):
"""Requests data from India OLBS flight briefing"""
# url = "https://olbs.amsschennai.gov.in/nsweb/FlightBriefing/showopmetquery.php"
# method = "POST"
# Temp redirect
url = "https://avbrief3.el.r.appspot.com/"
def _make_url(self, station: str) -> Tuple[str, dict]:
"""Returns a formatted URL and empty parameters"""
return self.url, {"icao": station}
def _post_data(self, station: str) -> dict:
"""Returns the POST form"""
# Can set icaos to "V*" to return all results
return {"icaos": station, "type": self.report_type}
@staticmethod
def _make_headers() -> dict:
"""Returns request headers"""
return {
# "Content-Type": "application/x-www-form-urlencoded",
# "Accept": "text/html, */*; q=0.01",
# "Accept-Language": "en-us",
"Accept-Encoding": "gzip, deflate, br",
# "Host": "olbs.amsschennai.gov.in",
"User-Agent": random.choice(USER_AGENTS),
"Connection": "keep-alive",
# "Referer": "https://olbs.amsschennai.gov.in/nsweb/FlightBriefing/",
# "X-Requested-With": "XMLHttpRequest",
"Accept-Language": "en-US,en;q=0.9",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Referer": "https://avbrief3.el.r.appspot.com/",
"Host": "avbrief3.el.r.appspot.com",
}
def _extract(self, raw: str, station: str) -> str:
"""Extracts the reports from HTML response"""
# start = raw.find(f"{self.report_type.upper()} {station} ")
return self._simple_extract(
raw, [f">{self.report_type.upper()}</div>", station], "="
)
class NAM(StationScrape):
"""Requests data from NorthAviMet for North Atlantic and Nordic countries"""
url = "https://www.northavimet.com/NamConWS/rest/opmet/command/0/"
def _make_url(self, station: str) -> Tuple[str, dict]:
"""Returns a formatted URL and empty parameters"""
return self.url + station, {}
def _extract(self, raw: str, station: str) -> str:
"""Extracts the reports from HTML response"""
starts = [f"<b>{self.report_type.upper()} <", f">{station.upper()}<", "<b> "]
report = self._simple_extract(raw, starts, "=")
return station + report[3:]
class AVT(StationScrape):
"""Requests data from AVT/XiamenAir for China
NOTE: This should be replaced later with a gov+https source
"""
url = "http://www.avt7.com/Home/AirportMetarInfo?airport4Code="
def _make_url(self, station: str) -> Tuple[str, dict]:
"""Returns a formatted URL and empty parameters"""
return self.url + station, {}
def _extract(self, raw: str, station: str) -> str:
"""Extracts the reports from HTML response"""
try:
data = json.loads(raw)
key = self.report_type.lower() + "ContentList"
return data[key]["rows"][0]["content"]
except (TypeError, json.decoder.JSONDecodeError, KeyError, IndexError):
return ""
PREFERRED = {
"RK": AMO,
# "SK": MAC,
}
BY_COUNTRY = {
"AU": AUBOM,
"CN": AVT,
"DK": NAM,
"EE": NAM,
"FI": NAM,
"FO": NAM,
"GL": NAM,
"IN": OLBS,
"IS": NAM,
"LV": NAM,
"NO": NAM,
"SE": NAM,
}
def get_service(station: str, country_code: str) -> ScrapeService:
"""Returns the preferred service for a given station"""
for prefix, service in PREFERRED.items():
if station.startswith(prefix):
return service # type: ignore
return BY_COUNTRY.get(country_code, NOAA) # type: ignore
| 35.30474 | 127 | 0.598146 |
9784f4fb0a748801f41e4829e067a2a2232dc788
| 1,158 |
py
|
Python
|
salt/_states/metalk8s_sysctl.py
|
SaintLoong/metalk8s
|
06fa3a731f35ab0f9ad8d3443fd8f8c4e7037432
|
[
"Apache-2.0"
] | 255 |
2018-08-03T17:32:53.000Z
|
2022-03-25T21:51:00.000Z
|
salt/_states/metalk8s_sysctl.py
|
SaintLoong/metalk8s
|
06fa3a731f35ab0f9ad8d3443fd8f8c4e7037432
|
[
"Apache-2.0"
] | 3,259 |
2018-08-03T00:25:56.000Z
|
2022-03-31T15:23:11.000Z
|
salt/_states/metalk8s_sysctl.py
|
SaintLoong/metalk8s
|
06fa3a731f35ab0f9ad8d3443fd8f8c4e7037432
|
[
"Apache-2.0"
] | 43 |
2018-08-08T01:47:22.000Z
|
2022-03-12T17:49:41.000Z
|
# -*- coding: utf-8 -*-
"""
Custom state to handle MetalK8s sysctl.
"""
from salt.exceptions import CommandExecutionError
__virtualname__ = "metalk8s_sysctl"
def __virtual__():
if "sysctl.present" not in __states__:
return (False, "sysctl state module could not be loaded")
return __virtualname__
def present(name, value, config=None, check_priority=True, strict=False):
"""
Wrapper around `sysctl.present` state module adding a check of
the sysctl parameter priority if `check_priority` is `True`.
If `strict` is set to `True`, check that the passed `config` is
the last file to define this `value`.
"""
if config is None:
config = __salt__["sysctl.default_config"]()
if check_priority:
try:
__salt__["metalk8s_sysctl.has_precedence"](name, value, config, strict)
except CommandExecutionError as exc:
return {
"name": name,
"result": False,
"changes": {},
"comment": "Unable to set sysctl value: {0}".format(exc),
}
return __states__["sysctl.present"](name, value, config)
| 29.692308 | 83 | 0.629534 |
1fcae82d2d853c8a3b17a3d00de7e0951a9718ff
| 92 |
py
|
Python
|
TexDBook/src/python/core/static_gzip.py
|
kkysen/TexDBook
|
61d9db5f00f04b00fd45ef50d0c8df417548d324
|
[
"MIT"
] | null | null | null |
TexDBook/src/python/core/static_gzip.py
|
kkysen/TexDBook
|
61d9db5f00f04b00fd45ef50d0c8df417548d324
|
[
"MIT"
] | 9 |
2018-05-18T16:19:27.000Z
|
2022-02-26T03:48:31.000Z
|
TexDBook/src/python/core/static_gzip.py
|
kkysen/TexDBook
|
61d9db5f00f04b00fd45ef50d0c8df417548d324
|
[
"MIT"
] | 1 |
2018-06-14T04:06:14.000Z
|
2018-06-14T04:06:14.000Z
|
from TexDBook.src.python.core.init_app import default_init_app
init_app = default_init_app
| 23 | 62 | 0.858696 |
5435df3090ba3f164d1685ab1d81a1d72ec1b0a5
| 10,065 |
py
|
Python
|
client-py/SessionTest.py
|
ljn55966005/iotdb
|
2788f4aa5450c207f6982d41d2377fcbf5b6f147
|
[
"Apache-2.0"
] | null | null | null |
client-py/SessionTest.py
|
ljn55966005/iotdb
|
2788f4aa5450c207f6982d41d2377fcbf5b6f147
|
[
"Apache-2.0"
] | null | null | null |
client-py/SessionTest.py
|
ljn55966005/iotdb
|
2788f4aa5450c207f6982d41d2377fcbf5b6f147
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Uncomment the following line to use apache-iotdb module installed by pip3
import numpy as np
from iotdb.Session import Session
from iotdb.utils.IoTDBConstants import TSDataType, TSEncoding, Compressor
from iotdb.utils.NumpyTablet import NumpyTablet
from iotdb.utils.Tablet import Tablet
# whether the test has passed
final_flag = True
failed_count = 0
def test_fail():
global failed_count
global final_flag
final_flag = False
failed_count += 1
def print_message(message):
print("*********")
print(message)
print("*********")
# creating session connection.
ip = "127.0.0.1"
port_ = "6667"
username_ = "root"
password_ = "root"
session = Session(ip, port_, username_, password_, fetch_size=1024, zone_id="UTC+8")
session.open(False)
if not session.is_open():
print("can't open session")
exit(1)
# set and delete storage groups
session.set_storage_group("root.sg_test_01")
session.set_storage_group("root.sg_test_02")
session.set_storage_group("root.sg_test_03")
session.set_storage_group("root.sg_test_04")
if session.delete_storage_group("root.sg_test_02") < 0:
test_fail()
print_message("delete storage group failed")
if session.delete_storage_groups(["root.sg_test_03", "root.sg_test_04"]) < 0:
test_fail()
print_message("delete storage groups failed")
# setting time series.
session.create_time_series(
"root.sg_test_01.d_01.s_01", TSDataType.BOOLEAN, TSEncoding.PLAIN, Compressor.SNAPPY
)
session.create_time_series(
"root.sg_test_01.d_01.s_02", TSDataType.INT32, TSEncoding.PLAIN, Compressor.SNAPPY
)
session.create_time_series(
"root.sg_test_01.d_01.s_03", TSDataType.INT64, TSEncoding.PLAIN, Compressor.SNAPPY
)
session.create_time_series(
"root.sg_test_01.d_02.s_01",
TSDataType.BOOLEAN,
TSEncoding.PLAIN,
Compressor.SNAPPY,
None,
{"tag1": "v1"},
{"description": "v1"},
"temperature"
)
# setting multiple time series once.
ts_path_lst_ = [
"root.sg_test_01.d_01.s_04",
"root.sg_test_01.d_01.s_05",
"root.sg_test_01.d_01.s_06",
"root.sg_test_01.d_01.s_07",
"root.sg_test_01.d_01.s_08",
"root.sg_test_01.d_01.s_09",
]
data_type_lst_ = [
TSDataType.FLOAT,
TSDataType.DOUBLE,
TSDataType.TEXT,
TSDataType.FLOAT,
TSDataType.DOUBLE,
TSDataType.TEXT,
]
encoding_lst_ = [TSEncoding.PLAIN for _ in range(len(data_type_lst_))]
compressor_lst_ = [Compressor.SNAPPY for _ in range(len(data_type_lst_))]
session.create_multi_time_series(
ts_path_lst_, data_type_lst_, encoding_lst_, compressor_lst_
)
ts_path_lst_ = [
"root.sg_test_01.d_02.s_04",
"root.sg_test_01.d_02.s_05",
"root.sg_test_01.d_02.s_06",
"root.sg_test_01.d_02.s_07",
"root.sg_test_01.d_02.s_08",
"root.sg_test_01.d_02.s_09",
]
data_type_lst_ = [
TSDataType.FLOAT,
TSDataType.DOUBLE,
TSDataType.TEXT,
TSDataType.FLOAT,
TSDataType.DOUBLE,
TSDataType.TEXT,
]
encoding_lst_ = [TSEncoding.PLAIN for _ in range(len(data_type_lst_))]
compressor_lst_ = [Compressor.SNAPPY for _ in range(len(data_type_lst_))]
tags_lst_ = [{"tag2": "v2"} for _ in range(len(data_type_lst_))]
attributes_lst_ = [{"description": "v2"} for _ in range(len(data_type_lst_))]
session.create_multi_time_series(
ts_path_lst_, data_type_lst_, encoding_lst_, compressor_lst_, None, tags_lst_, attributes_lst_, None
)
# delete time series
if (
session.delete_time_series(
[
"root.sg_test_01.d_01.s_07",
"root.sg_test_01.d_01.s_08",
"root.sg_test_01.d_01.s_09",
]
)
< 0
):
test_fail()
print_message("delete time series failed")
# checking time series
# s_07 expecting False
if session.check_time_series_exists("root.sg_test_01.d_01.s_07"):
test_fail()
print_message("root.sg_test_01.d_01.s_07 shouldn't exist")
# s_03 expecting True
if not session.check_time_series_exists("root.sg_test_01.d_01.s_03"):
test_fail()
print_message("root.sg_test_01.d_01.s_03 should exist")
# d_02.s_01 expecting True
if not session.check_time_series_exists("root.sg_test_01.d_02.s_01"):
test_fail()
print_message("root.sg_test_01.d_02.s_01 should exist")
# d_02.s_06 expecting True
if not session.check_time_series_exists("root.sg_test_01.d_02.s_06"):
test_fail()
print_message("root.sg_test_01.d_02.s_06 should exist")
# insert one record into the database.
measurements_ = ["s_01", "s_02", "s_03", "s_04", "s_05", "s_06"]
values_ = [False, 10, 11, 1.1, 10011.1, "test_record"]
data_types_ = [
TSDataType.BOOLEAN,
TSDataType.INT32,
TSDataType.INT64,
TSDataType.FLOAT,
TSDataType.DOUBLE,
TSDataType.TEXT,
]
if (
session.insert_record(
"root.sg_test_01.d_01", 1, measurements_, data_types_, values_
)
< 0
):
test_fail()
print_message("insert record failed")
# insert multiple records into database
measurements_list_ = [
["s_01", "s_02", "s_03", "s_04", "s_05", "s_06"],
["s_01", "s_02", "s_03", "s_04", "s_05", "s_06"],
]
values_list_ = [
[False, 22, 33, 4.4, 55.1, "test_records01"],
[True, 77, 88, 1.25, 8.125, "test_records02"],
]
data_type_list_ = [data_types_, data_types_]
device_ids_ = ["root.sg_test_01.d_01", "root.sg_test_01.d_01"]
if (
session.insert_records(
device_ids_, [2, 3], measurements_list_, data_type_list_, values_list_
)
< 0
):
test_fail()
print_message("insert records failed")
# insert one tablet into the database.
values_ = [
[False, 10, 11, 1.1, 10011.1, "test01"],
[True, 100, 11111, 1.25, 101.0, "test02"],
[False, 100, 1, 188.1, 688.25, "test03"],
[True, 0, 0, 0, 6.25, "test04"],
] # Non-ASCII text will cause error since bytes can only hold 0-128 nums.
timestamps_ = [4, 5, 6, 7]
tablet_ = Tablet(
"root.sg_test_01.d_01", measurements_, data_types_, values_, timestamps_
)
if session.insert_tablet(tablet_) < 0:
test_fail()
print_message("insert tablet failed")
# insert one numpy tablet into the database.
np_values_ = [
np.array([False, True, False, True], np.dtype('>?')),
np.array([10, 100, 100, 0], np.dtype('>i4')),
np.array([11, 11111, 1, 0], np.dtype('>i8')),
np.array([1.1, 1.25, 188.1, 0], np.dtype('>f4')),
np.array([10011.1, 101.0, 688.25, 6.25], np.dtype('>f8')),
["test01", "test02", "test03", "test04"],
]
np_timestamps_ = np.array([1, 2, 3, 4], np.dtype('>i8'))
np_tablet_ = NumpyTablet(
"root.sg_test_01.d_02", measurements_, data_types_, np_values_, np_timestamps_
)
if session.insert_tablet(np_tablet_) < 0:
test_fail()
print_message("insert numpy tablet failed")
# insert multiple tablets into database
tablet_01 = Tablet(
"root.sg_test_01.d_01", measurements_, data_types_, values_, [8, 9, 10, 11]
)
tablet_02 = Tablet(
"root.sg_test_01.d_01", measurements_, data_types_, values_, [12, 13, 14, 15]
)
if session.insert_tablets([tablet_01, tablet_02]) < 0:
test_fail()
print_message("insert tablets failed")
# insert one tablet with empty cells into the database.
values_ = [
[None, 10, 11, 1.1, 10011.1, "test01"],
[True, None, 11111, 1.25, 101.0, "test02"],
[False, 100, 1, None, 688.25, "test03"],
[True, 0, 0, 0, None, None],
] # Non-ASCII text will cause error since bytes can only hold 0-128 nums.
timestamps_ = [20, 21, 22, 23]
tablet_ = Tablet(
"root.sg_test_01.d_01", measurements_, data_types_, values_, timestamps_
)
if session.insert_tablet(tablet_) < 0:
test_fail()
print_message("insert tablet with empty cells failed")
# insert records of one device
time_list = [1, 2, 3]
measurements_list = [
["s_01", "s_02", "s_03"],
["s_01", "s_02", "s_03"],
["s_01", "s_02", "s_03"],
]
data_types_list = [
[TSDataType.BOOLEAN, TSDataType.INT32, TSDataType.INT64],
[TSDataType.BOOLEAN, TSDataType.INT32, TSDataType.INT64],
[TSDataType.BOOLEAN, TSDataType.INT32, TSDataType.INT64],
]
values_list = [[False, 22, 33], [True, 1, 23], [False, 15, 26]]
if (
session.insert_records_of_one_device(
"root.sg_test_01.d_01",
time_list,
measurements_list,
data_types_list,
values_list,
)
< 0
):
test_fail()
print_message("insert records of one device failed")
# execute non-query sql statement
if (
session.execute_non_query_statement(
"insert into root.sg_test_01.d_01(timestamp, s_02) values(16, 188)"
)
< 0
):
test_fail()
print_message(
"execute 'insert into root.sg_test_01.d_01(timestamp, s_02) values(16, 188)' failed"
)
# execute sql query statement
session_data_set = session.execute_query_statement("select * from root.sg_test_01.d_01")
session_data_set.set_fetch_size(1024)
expect_count = 20
actual_count = 0
while session_data_set.has_next():
print(session_data_set.next())
actual_count += 1
session_data_set.close_operation_handle()
if actual_count != expect_count:
test_fail()
print_message(
"query count mismatch: expect count: "
+ str(expect_count)
+ " actual count: "
+ str(actual_count)
)
# close session connection.
session.close()
if final_flag:
print("All executions done!!")
else:
print("Some test failed, please have a check")
print("failed count: ", failed_count)
exit(1)
| 29.866469 | 104 | 0.695678 |
f95f89599bfd5570ee14d5db093a8966caff7673
| 10,874 |
py
|
Python
|
libcloud/compute/drivers/dummy.py
|
rgharris/libcloud
|
90971e17bfd7b6bb97b2489986472c531cc8e140
|
[
"Apache-2.0"
] | null | null | null |
libcloud/compute/drivers/dummy.py
|
rgharris/libcloud
|
90971e17bfd7b6bb97b2489986472c531cc8e140
|
[
"Apache-2.0"
] | 1 |
2021-12-06T12:29:13.000Z
|
2021-12-06T12:29:13.000Z
|
libcloud/compute/drivers/dummy.py
|
rgharris/libcloud
|
90971e17bfd7b6bb97b2489986472c531cc8e140
|
[
"Apache-2.0"
] | 1 |
2019-08-05T10:12:02.000Z
|
2019-08-05T10:12:02.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Dummy Driver
@note: This driver is out of date
"""
import uuid
import socket
import struct
from libcloud.common.base import ConnectionKey
from libcloud.compute.base import NodeImage, NodeSize, Node
from libcloud.compute.base import NodeDriver, NodeLocation
from libcloud.compute.base import KeyPair
from libcloud.compute.types import Provider, NodeState
class DummyConnection(ConnectionKey):
"""
Dummy connection class
"""
def connect(self, host=None, port=None):
pass
class DummyNodeDriver(NodeDriver):
"""
Dummy node driver
This is a fake driver which appears to always create or destroy
nodes successfully.
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> node=driver.create_node()
>>> node.public_ips[0]
'127.0.0.3'
>>> node.name
'dummy-3'
If the credentials you give convert to an integer then the next
node to be created will be one higher.
Each time you create a node you will get a different IP address.
>>> driver = DummyNodeDriver(22)
>>> node=driver.create_node()
>>> node.name
'dummy-23'
"""
name = "Dummy Node Provider"
website = "http://example.com"
type = Provider.DUMMY
def __init__(self, creds):
"""
:param creds: Credentials
:type creds: ``str``
:rtype: ``None``
"""
self.creds = creds
try:
num = int(creds)
except ValueError:
num = None
if num:
self.nl = []
startip = _ip_to_int("127.0.0.1")
for i in range(num):
ip = _int_to_ip(startip + i)
self.nl.append(
Node(
id=i,
name="dummy-%d" % (i),
state=NodeState.RUNNING,
public_ips=[ip],
private_ips=[],
driver=self,
extra={"foo": "bar"},
)
)
else:
self.nl = [
Node(
id=1,
name="dummy-1",
state=NodeState.RUNNING,
public_ips=["127.0.0.1"],
private_ips=[],
driver=self,
extra={"foo": "bar"},
),
Node(
id=2,
name="dummy-2",
state=NodeState.RUNNING,
public_ips=["127.0.0.1"],
private_ips=[],
driver=self,
extra={"foo": "bar"},
),
]
self.connection = DummyConnection(self.creds)
def get_uuid(self, unique_field=None):
"""
:param unique_field: Unique field
:type unique_field: ``bool``
:rtype: :class:`UUID`
"""
return str(uuid.uuid4())
def list_nodes(self):
"""
List the nodes known to a particular driver;
There are two default nodes created at the beginning
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> node_list=driver.list_nodes()
>>> sorted([node.name for node in node_list ])
['dummy-1', 'dummy-2']
each item in the list returned is a node object from which you
can carry out any node actions you wish
>>> node_list[0].reboot()
True
As more nodes are added, list_nodes will return them
>>> node=driver.create_node()
>>> node.size.id
's1'
>>> node.image.id
'i2'
>>> sorted([n.name for n in driver.list_nodes()])
['dummy-1', 'dummy-2', 'dummy-3']
@inherits: :class:`NodeDriver.list_nodes`
"""
return self.nl
def reboot_node(self, node):
"""
Sets the node state to rebooting; in this dummy driver always
returns True as if the reboot had been successful.
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> node=driver.create_node()
>>> from libcloud.compute.types import NodeState
>>> node.state == NodeState.RUNNING
True
>>> node.state == NodeState.REBOOTING
False
>>> driver.reboot_node(node)
True
>>> node.state == NodeState.REBOOTING
True
Please note, dummy nodes never recover from the reboot.
@inherits: :class:`NodeDriver.reboot_node`
"""
node.state = NodeState.REBOOTING
return True
def destroy_node(self, node):
"""
Sets the node state to terminated and removes it from the node list
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> from libcloud.compute.types import NodeState
>>> node = [node for node in driver.list_nodes() if
... node.name == 'dummy-1'][0]
>>> node.state == NodeState.RUNNING
True
>>> driver.destroy_node(node)
True
>>> node.state == NodeState.RUNNING
False
>>> [n for n in driver.list_nodes() if n.name == 'dummy-1']
[]
@inherits: :class:`NodeDriver.destroy_node`
"""
node.state = NodeState.TERMINATED
self.nl.remove(node)
return True
def list_images(self, location=None):
"""
Returns a list of images as a cloud provider might have
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> sorted([image.name for image in driver.list_images()])
['Slackware 4', 'Ubuntu 9.04', 'Ubuntu 9.10']
@inherits: :class:`NodeDriver.list_images`
"""
return [
NodeImage(id=1, name="Ubuntu 9.10", driver=self),
NodeImage(id=2, name="Ubuntu 9.04", driver=self),
NodeImage(id=3, name="Slackware 4", driver=self),
]
def list_sizes(self, location=None):
"""
Returns a list of node sizes as a cloud provider might have
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> sorted([size.ram for size in driver.list_sizes()])
[128, 512, 4096, 8192]
@inherits: :class:`NodeDriver.list_images`
"""
return [
NodeSize(
id=1, name="Small", ram=128, disk=4, bandwidth=500, price=4, driver=self
),
NodeSize(
id=2,
name="Medium",
ram=512,
disk=16,
bandwidth=1500,
price=8,
driver=self,
),
NodeSize(
id=3,
name="Big",
ram=4096,
disk=32,
bandwidth=2500,
price=32,
driver=self,
),
NodeSize(
id=4,
name="XXL Big",
ram=4096 * 2,
disk=32 * 4,
bandwidth=2500 * 3,
price=32 * 2,
driver=self,
),
]
def list_locations(self):
"""
Returns a list of locations of nodes
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> sorted([loc.name + " in " + loc.country for loc in
... driver.list_locations()])
['Island Datacenter in FJ', 'London Loft in GB', "Paul's Room in US"]
@inherits: :class:`NodeDriver.list_locations`
"""
return [
NodeLocation(id=1, name="Paul's Room", country="US", driver=self),
NodeLocation(id=2, name="London Loft", country="GB", driver=self),
NodeLocation(id=3, name="Island Datacenter", country="FJ", driver=self),
]
def create_node(self, name, size, image):
"""
Creates a dummy node; the node id is equal to the number of
nodes in the node list
>>> from libcloud.compute.drivers.dummy import DummyNodeDriver
>>> driver = DummyNodeDriver(0)
>>> sorted([node.name for node in driver.list_nodes()])
['dummy-1', 'dummy-2']
>>> nodeA = driver.create_node()
>>> sorted([node.name for node in driver.list_nodes()])
['dummy-1', 'dummy-2', 'dummy-3']
>>> driver.create_node().name
'dummy-4'
>>> driver.destroy_node(nodeA)
True
>>> sorted([node.name for node in driver.list_nodes()])
['dummy-1', 'dummy-2', 'dummy-4']
@inherits: :class:`NodeDriver.create_node`
"""
num = len(self.nl) + 1
n = Node(
id=num,
name="dummy-%d" % (num),
state=NodeState.RUNNING,
public_ips=["127.0.0.%d" % (num)],
private_ips=[],
driver=self,
size=NodeSize(
id="s1",
name="foo",
ram=2048,
disk=160,
bandwidth=None,
price=0.0,
driver=self,
),
image=NodeImage(id="i2", name="image", driver=self),
extra={"foo": "bar"},
)
self.nl.append(n)
return n
def import_key_pair_from_string(self, name, key_material):
key_pair = KeyPair(
name=name,
public_key=key_material,
fingerprint="fingerprint",
private_key="private_key",
driver=self,
)
return key_pair
def _ip_to_int(ip):
return socket.htonl(struct.unpack("I", socket.inet_aton(ip))[0])
def _int_to_ip(ip):
return socket.inet_ntoa(struct.pack("I", socket.ntohl(ip)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30.205556 | 88 | 0.536233 |
abadae2800484f587f23c8acbfd7ec0cc0f42800
| 15,389 |
py
|
Python
|
pepper/libpepper.py
|
bloomberg/pepper
|
c0cbfa6385c0bd6b36f4a352eb482bce87d817c5
|
[
"Apache-2.0"
] | 1 |
2021-11-09T05:48:51.000Z
|
2021-11-09T05:48:51.000Z
|
pepper/libpepper.py
|
bloomberg/pepper
|
c0cbfa6385c0bd6b36f4a352eb482bce87d817c5
|
[
"Apache-2.0"
] | null | null | null |
pepper/libpepper.py
|
bloomberg/pepper
|
c0cbfa6385c0bd6b36f4a352eb482bce87d817c5
|
[
"Apache-2.0"
] | 2 |
2018-12-26T18:30:13.000Z
|
2021-11-09T05:48:29.000Z
|
'''
A Python library for working with Salt's REST API
(Specifically the rest_cherrypy netapi module.)
'''
import json
import logging
import re
import ssl
from pepper.exceptions import PepperException
try:
ssl._create_default_https_context = ssl._create_stdlib_context
except Exception:
pass
try:
from urllib.request import HTTPHandler, HTTPSHandler, Request, urlopen, \
install_opener, build_opener
from urllib.error import HTTPError, URLError
import urllib.parse as urlparse
except ImportError:
from urllib2 import HTTPHandler, HTTPSHandler, Request, urlopen, install_opener, build_opener, \
HTTPError, URLError
import urlparse
logger = logging.getLogger(__name__)
class Pepper(object):
'''
A thin wrapper for making HTTP calls to the salt-api rest_cherrpy REST
interface
>>> api = Pepper('https://localhost:8000')
>>> api.login('saltdev', 'saltdev', 'pam')
{"return": [
{
"eauth": "pam",
"expire": 1370434219.714091,
"perms": [
"test.*"
],
"start": 1370391019.71409,
"token": "c02a6f4397b5496ba06b70ae5fd1f2ab75de9237",
"user": "saltdev"
}
]
}
>>> api.low([{'client': 'local', 'tgt': '*', 'fun': 'test.ping'}])
{u'return': [{u'ms-0': True,
u'ms-1': True,
u'ms-2': True,
u'ms-3': True,
u'ms-4': True}]}
'''
def __init__(self, api_url='https://localhost:8000', debug_http=False, ignore_ssl_errors=False, timeout=None):
'''
Initialize the class with the URL of the API
:param api_url: Host or IP address of the salt-api URL;
include the port number
:param debug_http: Add a flag to urllib2 to output the HTTP exchange
:param ignore_ssl_errors: Add a flag to urllib2 to ignore invalid SSL certificates
:raises PepperException: if the api_url is misformed
'''
split = urlparse.urlsplit(api_url)
if split.scheme not in ['http', 'https']:
raise PepperException("salt-api URL missing HTTP(s) protocol: {0}"
.format(api_url))
self.api_url = api_url
self.debug_http = int(debug_http)
self._ssl_verify = not ignore_ssl_errors
self.auth = {}
self.salt_version = None
self.timeout = timeout
def req_stream(self, path):
'''
A thin wrapper to get a response from saltstack api.
The body of the response will not be downloaded immediately.
Make sure to close the connection after use.
api = Pepper('http://ipaddress/api/')
print(api.login('salt','salt','pam'))
response = api.req_stream('/events')
:param path: The path to the salt api resource
:return: :class:`Response <Response>` object
:rtype: requests.Response
'''
import requests
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
}
if self.auth and 'token' in self.auth and self.auth['token']:
headers.setdefault('X-Auth-Token', self.auth['token'])
else:
raise PepperException('Authentication required')
return
params = {'url': self._construct_url(path),
'headers': headers,
'verify': self._ssl_verify is True,
'stream': True
}
try:
resp = requests.get(**params)
if resp.status_code == 401:
raise PepperException(str(resp.status_code) + ':Authentication denied')
return
if resp.status_code == 500:
raise PepperException(str(resp.status_code) + ':Server error.')
return
if resp.status_code == 404:
raise PepperException(str(resp.status_code) + ' :This request returns nothing.')
return
except PepperException as e:
print(e)
return
return resp
def req_get(self, path):
'''
A thin wrapper from get http method of saltstack api
api = Pepper('http://ipaddress/api/')
print(api.login('salt','salt','pam'))
print(api.req_get('/keys'))
'''
import requests
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
}
if self.auth and 'token' in self.auth and self.auth['token']:
headers.setdefault('X-Auth-Token', self.auth['token'])
else:
raise PepperException('Authentication required')
return
params = {'url': self._construct_url(path),
'headers': headers,
'verify': self._ssl_verify is True,
}
try:
resp = requests.get(**params)
if resp.status_code == 401:
raise PepperException(str(resp.status_code) + ':Authentication denied')
return
if resp.status_code == 500:
raise PepperException(str(resp.status_code) + ':Server error.')
return
if resp.status_code == 404:
raise PepperException(str(resp.status_code) + ' :This request returns nothing.')
return
except PepperException as e:
print(e)
return
return resp.json()
def req(self, path, data=None):
'''
A thin wrapper around urllib2 to send requests and return the response
If the current instance contains an authentication token it will be
attached to the request as a custom header.
:rtype: dictionary
'''
if ((hasattr(data, 'get') and data.get('eauth') == 'kerberos')
or self.auth.get('eauth') == 'kerberos'):
return self.req_requests(path, data)
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
}
opener = build_opener()
for handler in opener.handlers:
if isinstance(handler, HTTPHandler):
handler.set_http_debuglevel(self.debug_http)
if isinstance(handler, HTTPSHandler):
handler.set_http_debuglevel(self.debug_http)
install_opener(opener)
# Build POST data
if data is not None:
postdata = json.dumps(data).encode()
clen = len(postdata)
else:
postdata = None
# Create request object
url = self._construct_url(path)
req = Request(url, postdata, headers)
# Add POST data to request
if data is not None:
req.add_header('Content-Length', clen)
# Add auth header to request
if path != '/run' and self.auth and 'token' in self.auth and self.auth['token']:
req.add_header('X-Auth-Token', self.auth['token'])
# Send request
try:
con_kwargs = {}
if not (self._ssl_verify):
con = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
con_kwargs['context'] = con
#if self.timeout:
# con_kwargs['timeout'] = self.timeout + 5 # throw a bit of buffer for upstream lag
f = urlopen(req, **con_kwargs)
content = f.read().decode('utf-8')
if (self.debug_http):
logger.debug('Response: %s', content)
ret = json.loads(content)
if not self.salt_version and 'x-salt-version' in f.headers:
self._parse_salt_version(f.headers['x-salt-version'])
except (HTTPError, URLError) as exc:
logger.debug('Error with request', exc_info=True)
status = getattr(exc, 'code', None)
if status == 401:
raise PepperException('Authentication denied')
if status == 500:
raise PepperException('Server error.')
logger.error('Error with request: {0}'.format(exc))
raise
except AttributeError:
logger.debug('Error converting response from JSON', exc_info=True)
raise PepperException('Unable to parse the server response.')
return ret
def req_requests(self, path, data=None):
'''
A thin wrapper around request and request_kerberos to send
requests and return the response
If the current instance contains an authentication token it will be
attached to the request as a custom header.
:rtype: dictionary
'''
import requests
from requests_kerberos import HTTPKerberosAuth, OPTIONAL
auth = HTTPKerberosAuth(mutual_authentication=OPTIONAL)
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Requested-With': 'XMLHttpRequest',
}
if self.auth and 'token' in self.auth and self.auth['token']:
headers.setdefault('X-Auth-Token', self.auth['token'])
# Optionally toggle SSL verification
params = {'url': self._construct_url(path),
'headers': headers,
'verify': self._ssl_verify is True,
'auth': auth,
'data': json.dumps(data),
}
#if self.timeout:
# params['timeout'] = self.timeout + 5
logger.debug('postdata {0}'.format(params))
resp = requests.post(**params)
if resp.status_code == 401:
# TODO should be resp.raise_from_status
raise PepperException('Authentication denied')
if resp.status_code == 500:
# TODO should be resp.raise_from_status
raise PepperException('Server error.')
if not self.salt_version and 'x-salt-version' in resp.headers:
self._parse_salt_version(resp.headers['x-salt-version'])
return resp.json()
def low(self, lowstate, path='/'):
'''
Execute a command through salt-api and return the response
:param string path: URL path to be joined with the API hostname
:param list lowstate: a list of lowstate dictionaries
'''
return self.req(path, lowstate)
def local(self, tgt, fun, arg=None, kwarg=None, expr_form='glob',
timeout=None, ret=None):
'''
Run a single command using the ``local`` client
Wraps :meth:`low`.
'''
low = {
'client': 'local',
'tgt': tgt,
'fun': fun,
}
if arg:
low['arg'] = arg
if kwarg:
low['kwarg'] = kwarg
if expr_form:
low['expr_form'] = expr_form
if timeout:
low['timeout'] = timeout
if ret:
low['ret'] = ret
return self.low([low])
def local_async(self, tgt, fun, arg=None, kwarg=None, expr_form='glob',
timeout=None, ret=None):
'''
Run a single command using the ``local_async`` client
Wraps :meth:`low`.
'''
low = {
'client': 'local_async',
'tgt': tgt,
'fun': fun,
}
if arg:
low['arg'] = arg
if kwarg:
low['kwarg'] = kwarg
if expr_form:
low['expr_form'] = expr_form
if timeout:
low['timeout'] = timeout
if ret:
low['ret'] = ret
return self.low([low])
def local_batch(self, tgt, fun, arg=None, kwarg=None, expr_form='glob',
batch='50%', ret=None):
'''
Run a single command using the ``local_batch`` client
Wraps :meth:`low`.
'''
low = {
'client': 'local_batch',
'tgt': tgt,
'fun': fun,
}
if arg:
low['arg'] = arg
if kwarg:
low['kwarg'] = kwarg
if expr_form:
low['expr_form'] = expr_form
if batch:
low['batch'] = batch
if ret:
low['ret'] = ret
return self.low([low])
def lookup_jid(self, jid):
'''
Get job results
Wraps :meth:`runner`.
'''
return self.runner('jobs.lookup_jid', jid='{0}'.format(jid))
def runner(self, fun, arg=None, **kwargs):
'''
Run a single command using the ``runner`` client
Usage::
runner('jobs.lookup_jid', jid=12345)
'''
low = {
'client': 'runner',
'fun': fun,
}
if arg:
low['arg'] = arg
low.update(kwargs)
return self.low([low])
def wheel(self, fun, arg=None, kwarg=None, **kwargs):
'''
Run a single command using the ``wheel`` client
Usage::
wheel('key.accept', match='myminion')
'''
low = {
'client': 'wheel',
'fun': fun,
}
if arg:
low['arg'] = arg
if kwarg:
low['kwarg'] = kwarg
low.update(kwargs)
return self.low([low])
def _send_auth(self, path, **kwargs):
return self.req(path, kwargs)
def login(self, username=None, password=None, eauth=None, **kwargs):
'''
Authenticate with salt-api and return the user permissions and
authentication token or an empty dict
'''
local = locals()
kwargs.update(
dict(
(key, local[key]) for key in (
'username',
'password',
'eauth'
) if local.get(key, None) is not None
)
)
self.auth = self._send_auth('/login', **kwargs).get('return', [{}])[0]
return self.auth
def token(self, **kwargs):
'''
Get an eauth token from Salt for use with the /run URL
'''
self.auth = self._send_auth('/token', **kwargs)[0]
return self.auth
def _construct_url(self, path):
'''
Construct the url to salt-api for the given path
Args:
path: the path to the salt-api resource
>>> api = Pepper('https://localhost:8000/salt-api/')
>>> api._construct_url('/login')
'https://localhost:8000/salt-api/login'
'''
relative_path = path.lstrip('/')
return urlparse.urljoin(self.api_url, relative_path)
def _parse_salt_version(self, version):
# borrow from salt.version
git_describe_regex = re.compile(
r'(?:[^\d]+)?(?P<major>[\d]{1,4})'
r'\.(?P<minor>[\d]{1,2})'
r'(?:\.(?P<bugfix>[\d]{0,2}))?'
r'(?:\.(?P<mbugfix>[\d]{0,2}))?'
r'(?:(?P<pre_type>rc|a|b|alpha|beta|nb)(?P<pre_num>[\d]{1}))?'
r'(?:(?:.*)-(?P<noc>(?:[\d]+|n/a))-(?P<sha>[a-z0-9]{8}))?'
)
match = git_describe_regex.match(version)
if match:
self.salt_version = match.groups()
| 30.11546 | 114 | 0.533303 |
e956da3f927a9b071dd35764c95c0b81bf8dec0d
| 4,039 |
py
|
Python
|
project/experiments/exp_017_generate_random_bodies/src/common/utils.py
|
liusida/thesis-bodies
|
dceb8a36efd2cefc611f6749a52b56b9d3572f7a
|
[
"MIT"
] | null | null | null |
project/experiments/exp_017_generate_random_bodies/src/common/utils.py
|
liusida/thesis-bodies
|
dceb8a36efd2cefc611f6749a52b56b9d3572f7a
|
[
"MIT"
] | null | null | null |
project/experiments/exp_017_generate_random_bodies/src/common/utils.py
|
liusida/thesis-bodies
|
dceb8a36efd2cefc611f6749a52b56b9d3572f7a
|
[
"MIT"
] | null | null | null |
import os
import pathlib
import yaml
import torch.nn as nn
import numpy as np
def get_exp_name():
"""Return current experiment folder, such as exp0."""
_full_path = str(pathlib.Path().absolute())
_paths = _full_path.split('/')
assert _paths[-1] == "src", f"Project structure has been changed. utils.get_exp_folder() should be changed accordingly.\n{_paths}"
assert len(_paths) > 2, "Why path is so short?"
_folder_name = _paths[-2]
return _folder_name
def get_output_data_folder(init=False):
# Create output folder is not exist yet.
_path = pathlib.Path("../../../output_data")
if not _path.is_dir():
print("Starting a new project? Congratulations! \n\nCreating output data path for the first time.")
print(f"mkdir {_path.resolve()}")
_path.mkdir()
output_data_folder = _path / get_exp_name()
output_data_folder.mkdir(exist_ok=True)
_subs = ["tensorboard", "plots", "models", "saved_images", "videos", "checkpoints", "tmp", "bodies"]
for _sub in _subs:
(output_data_folder / _sub).mkdir(exist_ok=True)
if init:
# Create a symlink to output_data
_sym_link = pathlib.Path("output_data")
if _sym_link.is_symlink():
_sym_link.unlink()
_sym_link.symlink_to(output_data_folder, target_is_directory=True)
return output_data_folder
def get_input_data_folder():
_path = pathlib.Path("../input_data")
assert _path.exists()
return _path
def get_current_folder():
return pathlib.Path()
def check_exp_folder():
"""Make sure .exp_folder contains the right folder name"""
_exp_folder = pathlib.Path(".exp_folder")
_folder = get_exp_name()
if _exp_folder.exists():
_str = _exp_folder.read_text()
if _folder == _str:
return
_exp_folder.write_text(_folder)
return
def build_model_filename(args):
filename = "model-"
filename += args.train_bodies_str.replace(",", "-")
if args.with_bodyinfo:
filename += "-body"
if args.vec_normalize:
filename += "-vnorm"
if args.stack_frames>1:
filename += f"-stack{args.stack_frames}"
if args.threshold_threshold!=0:
filename += f"-thr{args.threshold_threshold}"
if len(args.initialize_weights_from) > 0:
filename += f"-initw"
if args.topology_wrapper=="same":
if args.realign_method!="":
filename += f"-realign{args.realign_method}"
elif args.topology_wrapper=="diff":
if args.wrapper_case!="":
filename += f"-case{args.wrapper_case}"
elif args.topology_wrapper=="MutantWrapper":
filename += "-MutantWrapper"
else:
pass
# if args.misalign_obs:
# filename += f"-mis"
# if args.random_align_obs:
# filename += f"-ra"
# if args.preserve_header:
# filename += f"-ph"
# if args.random_even_same_body:
# filename += f"-resb"
# if args.preserve_feet_contact:
# filename += f"-pfc"
filename += f"-sd{args.seed}"
return filename
def mean_and_error(_data):
"""A helper for creating error bar"""
_data = np.array(_data)
_two_sigma = 2*np.std(_data)
_mean = np.mean(_data)
print(f"{_mean:.0f} +- {_two_sigma:.0f}")
return _mean, _two_sigma
def linux_fullscreen():
"""A helper for entering Full Screen mode in Linux.
Faking a mouse click and a key press (Ctrl+F11).
"""
from pymouse import PyMouse
from pykeyboard import PyKeyboard
m = PyMouse()
k = PyKeyboard()
x_dim, y_dim = m.screen_size()
m.click(int(x_dim/3), int(y_dim/2), 1)
k.press_key(k.control_key)
k.tap_key(k.function_keys[11])
k.release_key(k.control_key)
def load_hyperparameters(conf_name="MyWalkerEnv"):
with (get_input_data_folder() / "hyperparameters.yml").open() as f:
hp = yaml.load(f, Loader=yaml.SafeLoader)
hyperparams = hp[conf_name]
hyperparams["policy_kwargs"] = eval(hyperparams["policy_kwargs"])
return hyperparams
| 31.069231 | 134 | 0.649913 |
1d54d3159ceac8afcea6f64b58228dd0371c5710
| 4,282 |
py
|
Python
|
datagen.py
|
r-or/cnn-eyetrack
|
93a09f209aa8d34defc82c8734d35d5ce56b9060
|
[
"MIT"
] | null | null | null |
datagen.py
|
r-or/cnn-eyetrack
|
93a09f209aa8d34defc82c8734d35d5ce56b9060
|
[
"MIT"
] | null | null | null |
datagen.py
|
r-or/cnn-eyetrack
|
93a09f209aa8d34defc82c8734d35d5ce56b9060
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import sys
import os
import shutil
import argparse
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
import random
from multiprocessing import Pool
import cv2
import json
aparser = argparse.ArgumentParser()
aparser.add_argument('-tSet', help='Choose source training set (inside capture-output)')
aparser.add_argument('-name', help='Output name of augmentation')
aparser.add_argument('-valPart', help='The percentage of data to use for validation (]0 < valPart < 1[)', default=.12)
aparser.add_argument('-copy', help='Only copy, do NOT modify training set', action='store_true')
aparser.add_argument('-subSetNum', help='Choose an absolute number of samples to use', default = -1)
aparser.add_argument('-pathCap', help='Specify path to capture-output', nargs=1)
aparser.add_argument('-pathAug', help='Specify path to augmentation-output', nargs=1)
aparser.add_argument('-save', help='Save config into cfg.json', action='store_true')
args = aparser.parse_args()
if os.path.exists('cfg.json'):
with open('cfg.json', 'r') as cfgfile:
cfg = json.load(cfgfile)
else:
cfg = {}
if args.pathCap or 'capturepath' not in cfg:
cfg['capturepath'] = args.pathCap
if args.pathAug or 'augpath' not in cfg:
cfg['augpath'] = args.pathAug
if args.tSet or 'tSet' not in cfg:
cfg['tSet'] = args.tSet
if args.name or 'nameOfRun' not in cfg:
cfg['nameOfRun'] = args.name
if args.save:
with open('cfg.json', 'w') as cfgfile:
cfgfile.write(json.dumps(cfg, sort_keys=True, indent=2))
dataSet = cfg['tSet']
valPart = float(args.valPart)
subSetNum = int(args.subSetNum)
srcDirBare = os.path.join(cfg['capturepath'], dataSet)
srcDirTrain = os.path.join(cfg['augpath'], cfg['nameOfRun'] + '-train', 'images')
srcDirValidate = os.path.join(cfg['augpath'], cfg['nameOfRun'] + '-validate', 'images')
if os.path.exists(srcDirTrain):
shutil.rmtree(srcDirTrain)
if os.path.exists(srcDirValidate):
shutil.rmtree(srcDirValidate)
os.makedirs(srcDirTrain)
os.makedirs(srcDirValidate)
def randomBrightness(image):
hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
rand = random.uniform(.3, 1.0)
hsv[:, :, 2] = rand*hsv[:, :, 2]
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
return img
datagen = ImageDataGenerator(rotation_range = 0,
width_shift_range = .05,
height_shift_range = .05,
brightness_range = (.005, 3),
horizontal_flip = False,
fill_mode = 'nearest')
files = [f for f in os.listdir(srcDirBare) if f.endswith('.jpeg') or f.endswith('.jpg')]
random.shuffle(files)
if args.subSetNum != -1:
files = files[:int(args.subSetNum)]
filesTrain = files[:int(len(files) * (1 - valPart))]
filesValidate = files[int(len(files) * (1 - valPart)):]
if not args.copy:
# gen data with first part
def genImg(imgFile):
img = load_img(os.path.join(srcDirBare, imgFile))
x = img_to_array(img)
x = x.reshape((1,) + x.shape)
i = 0
for batch in datagen.flow(x,
batch_size = 1,
save_to_dir = srcDirTrain,
save_prefix = imgFile.split('.')[0],
shuffle = True,
save_format = 'jpeg'):
i += 1
if i > 20:
break
sys.stdout.write('Processing {} files... '.format(len(filesTrain)))
try:
pool = Pool(os.cpu_count())
for idx, _ in enumerate(pool.imap_unordered(genImg, filesTrain), 1):
sys.stdout.write('\b' * 6 + '{:5.2f}%'.format(100 * float(idx) / len(filesTrain)))
sys.stdout.flush()
print(' done!')
finally:
pool.close()
pool.join()
else:
sys.stdout.write('Copying {} files...'.format(len(filesTrain)))
for imgFile in filesTrain:
shutil.copy2(os.path.join(srcDirBare, imgFile), srcDirTrain)
print(' done!')
# copy rest
sys.stdout.write('Copying {} files...'.format(len(filesValidate)))
for imgFile in filesValidate:
shutil.copy2(os.path.join(srcDirBare, imgFile), srcDirValidate)
print(' done!')
| 36.598291 | 118 | 0.629379 |
4af45ccb01dd89136ca52d9a90c9fb9a3e45dac9
| 2,081 |
py
|
Python
|
metro_spider/middlewares.py
|
xujl930/crawl_metro
|
99c52f183fbf52a43847d98c2d0e666197a73287
|
[
"MIT"
] | null | null | null |
metro_spider/middlewares.py
|
xujl930/crawl_metro
|
99c52f183fbf52a43847d98c2d0e666197a73287
|
[
"MIT"
] | null | null | null |
metro_spider/middlewares.py
|
xujl930/crawl_metro
|
99c52f183fbf52a43847d98c2d0e666197a73287
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class SeleniumMiddleware(object):
pass
class ProxyMiddleware(object):
def process_request(self, request, spider):
request.meta['proxy'] = '127.0.0.1:1080'
class MetroSpiderSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| 32.015385 | 78 | 0.675156 |
c306bd1ceb02abb80fd505d65bc08f9ef261fea5
| 2,307 |
py
|
Python
|
examples/time_frequency/plot_source_space_time_frequency.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | 3 |
2021-01-04T08:45:56.000Z
|
2021-05-19T12:25:59.000Z
|
examples/time_frequency/plot_source_space_time_frequency.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | 28 |
2020-05-07T00:58:34.000Z
|
2020-08-29T23:02:17.000Z
|
examples/time_frequency/plot_source_space_time_frequency.py
|
fmamashli/mne-python
|
52f064415e7c9fa8fe243d22108dcdf3d86505b9
|
[
"BSD-3-Clause"
] | 3 |
2019-01-28T13:48:00.000Z
|
2019-07-10T16:02:11.000Z
|
"""
===================================================
Compute induced power in the source space with dSPM
===================================================
Returns STC files ie source estimates of induced power
for different bands in the source space. The inverse method
is linear based on dSPM inverse operator.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, source_band_induced_power
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
tmin, tmax, event_id = -0.2, 0.5, 1
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, include=include, exclude='bads')
# Load condition 1
event_id = 1
events = events[:10] # take 10 events to keep the computation time low
# Use linear detrend to reduce any edge artifacts
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6),
preload=True, detrend=1)
# Compute a source estimate per frequency band
bands = dict(alpha=[9, 11], beta=[18, 22])
stcs = source_band_induced_power(epochs, inverse_operator, bands, n_cycles=2,
use_fft=False, n_jobs=1)
for b, stc in stcs.items():
stc.save('induced_power_%s' % b)
###############################################################################
# plot mean power
plt.plot(stcs['alpha'].times, stcs['alpha'].data.mean(axis=0), label='Alpha')
plt.plot(stcs['beta'].times, stcs['beta'].data.mean(axis=0), label='Beta')
plt.xlabel('Time (ms)')
plt.ylabel('Power')
plt.legend()
plt.title('Mean source induced power')
plt.show()
| 33.434783 | 79 | 0.625921 |
575d9e2fad526589921e6daa001d766b9256fb20
| 2,446 |
py
|
Python
|
setup.py
|
praveshtora/setup.py
|
2575104a3132355db74c35224571c00fe660b77e
|
[
"MIT"
] | null | null | null |
setup.py
|
praveshtora/setup.py
|
2575104a3132355db74c35224571c00fe660b77e
|
[
"MIT"
] | 1 |
2019-07-18T16:34:50.000Z
|
2019-07-18T16:34:50.000Z
|
setup.py
|
praveshtora/setup.py
|
2575104a3132355db74c35224571c00fe660b77e
|
[
"MIT"
] | null | null | null |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
author='Simon Davy',
author_email='[email protected]',
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware',
'Topic :: System :: Logging',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
],
description='A common WSGI stack',
entry_points=dict(
console_scripts=[
'talisker=talisker:run_gunicorn',
'talisker.run=talisker:run',
'talisker.gunicorn=talisker:run_gunicorn',
'talisker.gunicorn.eventlet=talisker:run_gunicorn_eventlet',
'talisker.gunicorn.gevent=talisker:run_gunicorn_gevent',
'talisker.celery=talisker:run_celery',
],
),
extras_require=dict(
celery=[
'celery>=3.1.13.0,<5.0',
],
dev=[
'logging_tree>=1.7',
'pygments>=2.2',
'psutil>=5.0',
'objgraph>=3.0',
],
django=[
'django>=1.10, <2.3',
],
flask=[
'flask>=0.11,<2.0',
'blinker>=1.4,<2.0',
],
pg=[
'sqlparse',
'psycopg2',
],
prometheus=[
'prometheus-client>=0.2.0,<0.5.0' + ',!=0.4.0,!=0.4.1',
],
),
include_package_data=True,
install_requires=['pycryptodome==3.7.3','gunicorn>=19.7.0,<20.0', 'Werkzeug>=0.11.5,<0.15', 'statsd>=3.2.1,<4.0', 'requests>=2.10.0,<3.0', 'raven>=5.27.1,<7.0','future>=0.15.2,<0.17','ipaddress>=1.0.16,<2.0;python_version<"3.3"',],
keywords=[
'talisker',
],
name='talisker',
package_data=dict(
talisker=[
'logstash/*',
],
),
package_dir=dict(
talisker='talisker',
),
packages=[
'talisker',
],
test_suite='tests',
url='https://github.com/canonical-ols/talisker',
version='0.9.16',
zip_safe=False,
)
| 29.829268 | 235 | 0.52453 |
9f1d35be0bf43fc73d090e84f418ac6bc9d8f663
| 2,253 |
py
|
Python
|
tutorials_for_myself/oop_python/simple_super.py
|
pestun/ultimate-utils
|
676002e80422067256c43172a78825ed12954bcb
|
[
"MIT"
] | 5 |
2021-03-13T16:07:26.000Z
|
2021-09-09T17:00:36.000Z
|
tutorials_for_myself/oop_python/simple_super.py
|
pestun/ultimate-utils
|
676002e80422067256c43172a78825ed12954bcb
|
[
"MIT"
] | 8 |
2021-03-09T21:52:09.000Z
|
2021-12-02T17:23:33.000Z
|
tutorials_for_myself/oop_python/simple_super.py
|
pestun/ultimate-utils
|
676002e80422067256c43172a78825ed12954bcb
|
[
"MIT"
] | 5 |
2021-03-24T20:38:43.000Z
|
2022-03-17T07:54:12.000Z
|
#%%
"""
super: https://stackoverflow.com/questions/222877/what-does-super-do-in-python-difference-between-super-init-and-expl, https://realpython.com/python-super/#an-overview-of-pythons-super-function
subclass = if Child is a subclass of SomeBaseClass then the code from SomeBaseClass will be given to Child. e.g. a Square (more specific) is a subclass of a Rectangle (usually more general). https://www.codesdope.com/course/python-subclass-of-a-class/
super - basics with single inheritance: https://realpython.com/python-super/#a-super-deep-dive
- super(ChildClassName, self[instance of ChildClass]) can take two inputs the first the subclass & the second a specific instance of the subclass.
- By including an instantiated object, super() returns a bound method: a method that is bound to the object, which gives the method the object’s context such as any instance attributes.
In Python 3, the super(Square, self) call is equivalent to the parameterless super() call.
Goal: understand super(_BatchNorm, self).__init__(...)
"""
class Square:
def __init__(self, side: float):
self.side = side
def area(self) -> float:
return self.side**2
class Cube(Square):
def surface_area(self) -> float:
# same as super(Square, self) in python3
area_one_face: float = super().area()
return area_one_face * 6
def volume(self):
# face_area = super(Square, self).area()
face_area = super().area()
return face_area * self.length
def _update_side(self, side: int) -> None:
# super(Cube, self).__init__(side=side) # for python 2
super().__init__(side=side)
assert(self.side == side), f'Should have updated the side to {side} but it\s: {self.side}'
c1: Cube = Cube(3)
print(c1.side)
c1._update_side(4)
print(c1.side)
#%%
class Rectangle:
def __init__(self, length, width):
self.length = length
self.width = width
def area(self):
return self.length * self.width
def perimeter(self):
return 2 * self.length + 2 * self.width
# Here we declare that the Square class inherits from the Rectangle class
class Square(Rectangle):
def __init__(self, length):
super().__init__(length, length)
| 35.203125 | 251 | 0.691966 |
2d27ad7534a22939c1e34421115867179093f82b
| 848 |
py
|
Python
|
setup.py
|
NalbertLeal/pyloading
|
dd9f9788c961abb1d669dade8603a08be18053d9
|
[
"Apache-2.0"
] | 1 |
2022-03-23T15:17:07.000Z
|
2022-03-23T15:17:07.000Z
|
setup.py
|
NalbertLeal/pyloading
|
dd9f9788c961abb1d669dade8603a08be18053d9
|
[
"Apache-2.0"
] | 2 |
2022-03-23T15:27:36.000Z
|
2022-03-23T15:47:13.000Z
|
setup.py
|
NalbertLeal/pyloading
|
dd9f9788c961abb1d669dade8603a08be18053d9
|
[
"Apache-2.0"
] | 1 |
2022-03-23T15:35:16.000Z
|
2022-03-23T15:35:16.000Z
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="pyloading_bar",
packages=['pyloading_bar'],
version="0.0.2",
author="Nalbert Gabriel Melo Leal",
author_email="[email protected]",
description="A python loading bar to use in terminal applications",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/NalbertLeal/pyloading_bar",
project_urls={
"Bug Tracker": "https://github.com/NalbertLeal/pyloading_bar/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
],
python_requires=">=3.8",
)
| 31.407407 | 73 | 0.704009 |
51647238d25382bd642264723a4136102b0d9e79
| 1,109 |
py
|
Python
|
admin_resumable/conf.py
|
fdemmer/django-admin-resumable-js
|
8a99bd6c1757647d453df08b0391270b0007bb92
|
[
"MIT"
] | null | null | null |
admin_resumable/conf.py
|
fdemmer/django-admin-resumable-js
|
8a99bd6c1757647d453df08b0391270b0007bb92
|
[
"MIT"
] | null | null | null |
admin_resumable/conf.py
|
fdemmer/django-admin-resumable-js
|
8a99bd6c1757647d453df08b0391270b0007bb92
|
[
"MIT"
] | 1 |
2019-01-28T18:50:42.000Z
|
2019-01-28T18:50:42.000Z
|
# -*- coding: utf-8 -*-
from django.conf import settings
# resumable.js settings
# chunkSize
ADMIN_RESUMABLE_CHUNKSIZE = getattr(
settings,
'ADMIN_RESUMABLE_CHUNKSIZE',
'1*1024*1024'
)
# simultaneousUploads
ADMIN_RESUMABLE_PARALLEL = getattr(
settings,
'ADMIN_RESUMABLE_PARALLEL',
3
)
# prioritizeFirstAndLastChunk
ADMIN_RESUMABLE_FIRSTLAST = getattr(
settings,
'ADMIN_RESUMABLE_FIRSTLAST',
False
)
# maxChunkRetries
ADMIN_RESUMABLE_RETRIES = getattr(
settings,
'ADMIN_RESUMABLE_RETRIES',
None
)
# widget settings
ADMIN_RESUMABLE_SHOW_THUMB = getattr(
settings,
'ADMIN_RESUMABLE_SHOW_THUMB',
False
)
# others
ADMIN_RESUMABLE_CHUNKSUFFIX = getattr(
settings,
'ADMIN_RESUMABLE_CHUNKSUFFIX',
'_part_'
)
# put final filesize as prefix in filename
ADMIN_RESUMABLE_SIZE_PREFIX = getattr(
settings,
'ADMIN_RESUMABLE_SIZE_PREFIX',
True
)
# default storage class for ModelAdminResumableFileField
ADMIN_RESUMABLE_STORAGE = getattr(
settings,
'ADMIN_RESUMABLE_STORAGE',
'django.core.files.storage.FileSystemStorage'
)
| 19.12069 | 56 | 0.743012 |
63959a844e63ec3f482799fcf7318a38acaba918
| 6,041 |
py
|
Python
|
nltk/classify/scikitlearn.py
|
dreamhost/nltk
|
7f1fad731d1bbaa9334a665221d68b86d8164dce
|
[
"Apache-2.0"
] | 8 |
2015-10-09T03:37:18.000Z
|
2019-06-02T19:32:40.000Z
|
nltk/classify/scikitlearn.py
|
dreamhost/nltk
|
7f1fad731d1bbaa9334a665221d68b86d8164dce
|
[
"Apache-2.0"
] | null | null | null |
nltk/classify/scikitlearn.py
|
dreamhost/nltk
|
7f1fad731d1bbaa9334a665221d68b86d8164dce
|
[
"Apache-2.0"
] | 2 |
2017-06-02T12:07:23.000Z
|
2020-05-20T11:36:09.000Z
|
# Natural Language Toolkit: Interface to scikit-learn classifiers
#
# Author: Lars Buitinck <[email protected]>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
scikit-learn (http://scikit-learn.org) is a machine learning library for
Python, supporting most of the basic classification algorithms, including SVMs,
Naive Bayes, logistic regression and decision trees.
This package implement a wrapper around scikit-learn classifiers. To use this
wrapper, construct a scikit-learn classifier, then use that to construct a
SklearnClassifier. E.g., to wrap a linear SVM classifier with default settings,
do
>>> from sklearn.svm.sparse import LinearSVC
>>> from nltk.classify.scikitlearn import SklearnClassifier
>>> classif = SklearnClassifier(LinearSVC())
The scikit-learn classifier may be arbitrarily complex. E.g., the following
constructs and wraps a Naive Bayes estimator with tf-idf weighting and
chi-square feature selection:
>>> from sklearn.feature_extraction.text import TfidfTransformer
>>> from sklearn.feature_selection import SelectKBest, chi2
>>> from sklearn.naive_bayes import MultinomialNB
>>> from sklearn.pipeline import Pipeline
>>> pipeline = Pipeline([('tfidf', TfidfTransformer()),
... ('chi2', SelectKBest(chi2, k=1000)),
... ('nb', MultinomialNB())])
>>> classif = SklearnClassifier(pipeline)
(Such a classifier could be trained on word counts for text classification.)
"""
from nltk.classify.api import ClassifierI
from nltk.probability import DictionaryProbDist
import numpy as np
from scipy.sparse import coo_matrix
class SklearnClassifier(ClassifierI):
"""Wrapper for scikit-learn classifiers."""
def __init__(self, estimator, dtype=float, sparse=True):
"""
:param estimator: scikit-learn classifier object.
:param dtype: data type used when building feature array.
scikit-learn estimators work exclusively on numeric data; use bool
when all features are binary.
:param sparse: Whether to use sparse matrices. The estimator must
support these; not all scikit-learn classifiers do. The default
value is True, since most NLP problems involve sparse feature sets.
:type sparse: boolean.
"""
self._clf = estimator
self._dtype = dtype
self._sparse = sparse
def __repr__(self):
return "<SklearnClassifier(%r)>" % self._clf
def batch_classify(self, featuresets):
X = self._convert(featuresets)
y = self._clf.predict(X)
return [self._index_label[int(yi)] for yi in y]
def batch_prob_classify(self, featuresets):
X = self._convert(featuresets)
y_proba = self._clf.predict_proba(X)
return [self._make_probdist(y_proba[i]) for i in xrange(len(y_proba))]
def labels(self):
return self._label_index.keys()
def train(self, labeled_featuresets):
"""
Train (fit) the scikit-learn estimator.
:param labeled_featuresets: A list of classified featuresets,
i.e., a list of tuples ``(featureset, label)``.
"""
self._feature_index = {}
self._index_label = []
self._label_index = {}
for fs, label in labeled_featuresets:
for f in fs.iterkeys():
if f not in self._feature_index:
self._feature_index[f] = len(self._feature_index)
if label not in self._label_index:
self._index_label.append(label)
self._label_index[label] = len(self._label_index)
featuresets, labels = zip(*labeled_featuresets)
X = self._convert(featuresets)
y = np.array([self._label_index[l] for l in labels])
self._clf.fit(X, y)
return self
def _convert(self, featuresets):
if self._sparse:
return self._featuresets_to_coo(featuresets)
else:
return self._featuresets_to_array(featuresets)
def _featuresets_to_coo(self, featuresets):
"""Convert featuresets to sparse matrix (COO format)."""
i_ind = []
j_ind = []
values = []
for i, fs in enumerate(featuresets):
for f, v in fs.iteritems():
try:
j = self._feature_index[f]
i_ind.append(i)
j_ind.append(j)
values.append(self._dtype(v))
except KeyError:
pass
shape = (i + 1, len(self._feature_index))
return coo_matrix((values, (i_ind, j_ind)), shape=shape, dtype=self._dtype)
def _featuresets_to_array(self, featuresets):
"""Convert featureset to Numpy array."""
X = np.zeros((len(featuresets), len(self._feature_index)),
dtype=self._dtype)
for i, fs in enumerate(featuresets):
for f, v in fs.iteritems():
try:
X[i, self._feature_index[f]] = self._dtype(v)
except KeyError: # feature not seen in training
pass
return X
def _make_probdist(self, y_proba):
return DictionaryProbDist(dict((self._index_label[i], p)
for i, p in enumerate(y_proba)))
if __name__ == "__main__":
from nltk.classify.util import names_demo, binary_names_demo_features
try:
from sklearn.linear_model.sparse import LogisticRegression
except ImportError: # separate sparse LR to be removed in 0.12
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import BernoulliNB
print("scikit-learn Naive Bayes:")
names_demo(SklearnClassifier(BernoulliNB(binarize=False), dtype=bool).train,
features=binary_names_demo_features)
print("scikit-learn logistic regression:")
names_demo(SklearnClassifier(LogisticRegression(), dtype=np.float64).train,
features=binary_names_demo_features)
| 36.173653 | 83 | 0.648734 |
66ba492b520dcc520efaf310c157c0fba6d559c7
| 985 |
py
|
Python
|
recurce_13/p_part_sort.py
|
master-cim/algorithm
|
a57f473ceb32b96240989e31ac33154e55c00724
|
[
"MIT"
] | 1 |
2022-03-31T07:30:53.000Z
|
2022-03-31T07:30:53.000Z
|
recurce_13/p_part_sort.py
|
master-cim/algorithm
|
a57f473ceb32b96240989e31ac33154e55c00724
|
[
"MIT"
] | null | null | null |
recurce_13/p_part_sort.py
|
master-cim/algorithm
|
a57f473ceb32b96240989e31ac33154e55c00724
|
[
"MIT"
] | 2 |
2022-03-04T09:42:03.000Z
|
2022-03-30T14:51:32.000Z
|
# P. Частичная сортировка
# ID успешной посылки
def subtraction_items(square):
while square != []:
i = square.pop()
for item in square:
yield item - i
def partial_sorting(numb, arr):
pr_less_bf = 0
pr_big_bf = 0
step = len(arr) - 1
zerr_ar = 1
print(arr)
print(sorted(arr))
print(arr[0], arr[1], arr[2], arr[3])
while arr[step] != 0:
if arr[step] < arr[step-1] and arr[step-1] == 0:
pr_less_bf += 1
step -= 1
elif arr[step] > arr[step-1]:
pr_big_bf += 1
step -= 1
elif arr[step] == 0:
zerr_ar = 0
else:
break
# print(pr_less_bf, pr_big_bf, zerr_ar)
# print(pr_less_bf+pr_big_bf+zerr_ar)
def read_input():
numb = int(input())
arr = [int(element) for element in input().strip().split()]
return numb, arr
if __name__ == '__main__':
numb, arr = read_input()
partial_sorting(numb, arr)
| 22.386364 | 63 | 0.543147 |
ead2b7fa59348effa8ed51f4b676af53e1b3afe9
| 1,056 |
py
|
Python
|
demo/admin.py
|
peopledoc/django-agnocomplete
|
54cf0d2e63be31fef8d3220815d6e7c4d8594896
|
[
"MIT"
] | 10 |
2017-12-11T23:33:00.000Z
|
2022-01-24T05:28:48.000Z
|
demo/admin.py
|
peopledoc/django-agnocomplete
|
54cf0d2e63be31fef8d3220815d6e7c4d8594896
|
[
"MIT"
] | 26 |
2017-11-21T08:55:29.000Z
|
2022-02-04T13:44:27.000Z
|
demo/admin.py
|
peopledoc/django-agnocomplete
|
54cf0d2e63be31fef8d3220815d6e7c4d8594896
|
[
"MIT"
] | 2 |
2018-01-10T09:06:49.000Z
|
2021-07-11T10:24:23.000Z
|
from django.contrib import admin
from django import forms
from agnocomplete import fields
from .models import Person, FavoriteColor, Tag, PersonTag
# Below, importing unused class from autocomplete.py to trigger
# the registering of agnocomplete models
from .autocomplete import logger
__all__ = ['logger']
class PersonAdmin(admin.ModelAdmin):
pass
# Autocomplete for FavoriteColor admin
class FavoriteColorModelForm(forms.ModelForm):
person = fields.AgnocompleteModelField('AutocompletePerson')
class Meta:
fields = ('color', 'person')
model = FavoriteColor
class FavoriteColorAdmin(admin.ModelAdmin):
form = FavoriteColorModelForm
class Media:
css = {
'screen': ('css/admin.css', 'css/selectize.css',)
}
js = (
'js/jquery.js',
'js/selectize.js',
'js/demo/selectize.js',
)
admin.site.register(Person, PersonAdmin)
admin.site.register(FavoriteColor, FavoriteColorAdmin)
admin.site.register(Tag)
admin.site.register(PersonTag)
| 22.956522 | 64 | 0.696023 |
8868bc582e7ba866584143c55579d7fea66793de
| 2,193 |
py
|
Python
|
Gradient_descent/gradient.py
|
sadeedbari/DeepLearningNanodegreeU
|
f0808de900a2141bd1c181cb511c2e909b2ca959
|
[
"MIT"
] | null | null | null |
Gradient_descent/gradient.py
|
sadeedbari/DeepLearningNanodegreeU
|
f0808de900a2141bd1c181cb511c2e909b2ca959
|
[
"MIT"
] | null | null | null |
Gradient_descent/gradient.py
|
sadeedbari/DeepLearningNanodegreeU
|
f0808de900a2141bd1c181cb511c2e909b2ca959
|
[
"MIT"
] | null | null | null |
import numpy as np
from data_prep import features, targets, features_test, targets_test
def sigmoid(x):
"""
Calculate sigmoid
"""
return 1 / (1 + np.exp(-x))
# TODO: We haven't provided the sigmoid_prime function like we did in
# the previous lesson to encourage you to come up with a more
# efficient solution. If you need a hint, check out the comments
# in solution.py from the previous lecture.
# Use to same seed to make debugging easier
np.random.seed(42)
n_records, n_features = features.shape
last_loss = None
# Initialize weights
weights = np.random.normal(scale=1 / n_features**.5, size=n_features)
# Neural Network hyperparameters
epochs = 1000
learnrate = 0.5
for e in range(epochs):
del_w = np.zeros(weights.shape)
for x, y in zip(features.values, targets):
# Loop through all records, x is the input, y is the target
# Note: We haven't included the h variable from the previous
# lesson. You can add it if you want, or you can calculate
# the h together with the output
# TODO: Calculate the output
output = sigmoid(np.dot(x, weights))
# TODO: Calculate the error
error = y - output
# TODO: Calculate the error term
error_term = error * (output*(1-output))
# TODO: Calculate the change in weights for this sample
# and add it to the total weight change
del_w += error_term*x
# TODO: Update weights using the learning rate and the average change in weights
weights += learnrate * del_w / n_records
# Printing out the mean square error on the training set
if e % (epochs / 10) == 0:
out = sigmoid(np.dot(features, weights))
loss = np.mean((out - targets) ** 2)
if last_loss and last_loss < loss:
print("Train loss: ", loss, " WARNING - Loss Increasing")
else:
print("Train loss: ", loss)
last_loss = loss
# Calculate accuracy on test data
tes_out = sigmoid(np.dot(features_test, weights))
predictions = tes_out > 0.5
accuracy = np.mean(predictions == targets_test)
print("Prediction accuracy: {:.3f}".format(accuracy))
| 31.328571 | 84 | 0.653899 |
92033d18a9390ed1e090f18483603009a3c5eb69
| 1,341 |
py
|
Python
|
ads/type_discovery/unknown_detector.py
|
oracle/accelerated-data-science
|
d594ed0c8c1365daf4cf9e860daebc760fa9a24b
|
[
"UPL-1.0",
"Apache-2.0"
] | 20 |
2022-02-22T19:07:09.000Z
|
2022-03-16T17:21:42.000Z
|
ads/type_discovery/unknown_detector.py
|
oracle/accelerated-data-science
|
d594ed0c8c1365daf4cf9e860daebc760fa9a24b
|
[
"UPL-1.0",
"Apache-2.0"
] | null | null | null |
ads/type_discovery/unknown_detector.py
|
oracle/accelerated-data-science
|
d594ed0c8c1365daf4cf9e860daebc760fa9a24b
|
[
"UPL-1.0",
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8; -*-
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/
from __future__ import print_function, absolute_import, division
import pandas as pd
from ads.type_discovery import logger
from ads.type_discovery.abstract_detector import AbstractTypeDiscoveryDetector
from ads.type_discovery.typed_feature import (
UnknownTypedFeature,
CategoricalTypedFeature,
)
class UnknownDetector(AbstractTypeDiscoveryDetector):
def discover(self, name, series):
candidate = series.loc[~series.isnull()].iloc[0]
if series.dtype == "object":
#
# if we got all through all the other detectors and it's a string type of feature then we
# just call it a high dimensional categorical
#
return CategoricalTypedFeature.build(name, series)
else:
logger.debug(
"type discovery on column [{}]/[{}] result is Unknown".format(
name, series.dtype
)
)
return UnknownTypedFeature.build(name, series)
if __name__ == "__main__":
dd = UnknownDetector()
print(dd.discover("unknown", pd.Series([None, "94065", "90210", None])))
| 31.928571 | 104 | 0.658464 |
4fc8dd12a56ed039f4beffc8bc385b6f213179fa
| 606 |
py
|
Python
|
djangoum3/usuarios/forms.py
|
souluanf/django-essencial
|
95d4d6d85bbe590ae95c3b26fb9577b97c2f5e7c
|
[
"MIT"
] | null | null | null |
djangoum3/usuarios/forms.py
|
souluanf/django-essencial
|
95d4d6d85bbe590ae95c3b26fb9577b97c2f5e7c
|
[
"MIT"
] | 33 |
2020-08-04T00:46:16.000Z
|
2022-03-12T00:45:43.000Z
|
djangoum3/usuarios/forms.py
|
souluanf/django-essencial
|
95d4d6d85bbe590ae95c3b26fb9577b97c2f5e7c
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import CustomUsuario
class CustomUsuarioCreateForm(UserCreationForm):
model = CustomUsuario
fields = ('first_name', 'last_name', 'fone')
labels = {'username': 'Username/E-amil'}
def save(self, commit=True):
user = super().save(commit=True)
user.set_password(self.cleaned_data['password'])
user.email = self.cleaned_data['username']
if commit:
user.save()
return user
class CustomUsuarioChangeForm(UserChangeForm):
class Meta:
model = CustomUsuario
fields = ('first_name', 'last_name', 'fone')
| 24.24 | 70 | 0.742574 |
2c57b7c23a0afe384195e7aad85cf5f32eec72db
| 11,421 |
py
|
Python
|
src/icemac/addressbook/browser/search/result/handler/export/test_xls.py
|
icemac/icemac.addressbook
|
6197e6e01da922feb100dd0943576523050cd703
|
[
"BSD-2-Clause"
] | 1 |
2020-03-26T20:16:44.000Z
|
2020-03-26T20:16:44.000Z
|
src/icemac/addressbook/browser/search/result/handler/export/test_xls.py
|
icemac/icemac.addressbook
|
6197e6e01da922feb100dd0943576523050cd703
|
[
"BSD-2-Clause"
] | 2 |
2020-02-21T13:04:23.000Z
|
2020-02-21T13:06:10.000Z
|
src/icemac/addressbook/browser/search/result/handler/export/test_xls.py
|
icemac/icemac.addressbook
|
6197e6e01da922feb100dd0943576523050cd703
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import xlrd
from icemac.addressbook.interfaces import IPerson
def create_person(
field_title, keyword_title, address_book, FieldFactory,
FullPersonFactory, PostalAddressFactory):
"""Create a person in the address book as needed by the tests."""
field_name = FieldFactory(
address_book, IPerson, u'Bool', field_title).__name__
person = FullPersonFactory(
address_book, u'Liebig', postal__city=u'Testhausen',
keywords=[keyword_title], **{field_name: True})
# Let's create an additional postal address which does not show up in
# the defaults export but in complete export:
PostalAddressFactory(person, zip=u'00001')
def test_xls__DefaultsExport__1(search_data, browser):
"""`DefaultsExport` can export the values of the default data as XLS."""
browser.login('visitor')
browser.keyword_search('church', 'XLS export main')
assert 'application/vnd.ms-excel' == browser.headers['Content-Type']
assert ('attachment; filename=addressbook_export.xls' ==
browser.headers['Content-Disposition'])
xls_workbook = xlrd.open_workbook(file_contents=browser.contents)
assert [u'Address book - Export'] == xls_workbook.sheet_names()
work_sheet_0 = xls_workbook.sheet_by_index(0)
assert (5, 13) == (work_sheet_0.nrows, work_sheet_0.ncols)
assert [
[u'person',
'',
'',
'',
'',
u'postal address',
'',
'',
'',
'',
u'phone number',
u'e-mail address',
u'home page address'],
[u'first name',
u'last name',
u'birth date',
u'keywords',
u'notes',
u'address prefix',
u'street',
u'city',
u'zip',
u'country',
u'number',
u'e-mail address',
u'URL'],
['',
u'Koch',
19017.0,
u'family, church',
u'father-in-law',
'',
'',
'',
'',
u'Germany',
'',
'',
''],
['',
u'Liebig',
'',
u'church',
u'family',
'',
'',
'',
'',
u'Germany',
'',
'',
''],
['',
u'Velleuer',
'',
u'family, church',
'',
'',
'',
'',
'',
u'Germany',
'',
'',
'']] == [work_sheet_0.row_values(rx)
for rx in range(work_sheet_0.nrows)]
def test_xls__DefaultsExport__2(search_data, browser):
"""`DefaultsExport` only exports the selected persons."""
browser.login('visitor')
browser.keyword_search('church')
browser.getControl('Apply on selected persons').displayValue = [
'XLS export main']
browser.getControl(
name='persons:list').getControl(value="Person-2").selected = False
browser.getControl(name='form.buttons.apply').click()
xls_workbook = xlrd.open_workbook(file_contents=browser.contents)
work_sheet_0 = xls_workbook.sheet_by_index(0)
assert (4, 13) == (work_sheet_0.nrows, work_sheet_0.ncols)
assert [
[u'person',
'',
'',
'',
'',
u'postal address',
'',
'',
'',
'',
u'phone number',
u'e-mail address',
u'home page address'],
[u'first name',
u'last name',
u'birth date',
u'keywords',
u'notes',
u'address prefix',
u'street',
u'city',
u'zip',
u'country',
u'number',
u'e-mail address',
u'URL'],
['',
u'Liebig',
'',
u'church',
u'family',
'',
'',
'',
'',
u'Germany',
'',
'',
''],
['',
u'Velleuer',
'',
u'family, church',
'',
'',
'',
'',
'',
u'Germany',
'',
'',
'']] == [work_sheet_0.row_values(rx)
for rx in range(work_sheet_0.nrows)]
def test_xls__DefaultsExport__3(
address_book, FieldFactory, FullPersonFactory, PostalAddressFactory,
browser):
"""`DefaultsExport` exports user defined fields like other fields."""
create_person(u'photo permission?', u'church', address_book, FieldFactory,
FullPersonFactory, PostalAddressFactory)
browser.login('visitor')
browser.open(browser.SEARCH_URL)
# We choose the keyword search as it has export abilities:
browser.getLink('Keyword search').click()
browser.getControl('keywords').displayValue = ['church']
browser.getControl('Search').click()
# The export produces an XLS file:
browser.getControl('Apply on selected persons').displayValue = [
'XLS export main']
browser.getControl(name='form.buttons.apply').click()
assert 'application/vnd.ms-excel' == browser.headers['Content-Type']
xls_workbook = xlrd.open_workbook(file_contents=browser.contents)
work_sheet_0 = xls_workbook.sheet_by_index(0)
assert (3, 14) == (work_sheet_0.nrows, work_sheet_0.ncols)
assert [
[u'person',
'',
'',
'',
'',
'',
u'postal address',
'',
'',
'',
'',
u'phone number',
u'e-mail address',
u'home page address'],
[u'first name',
u'last name',
u'birth date',
u'keywords',
u'notes',
u'photo permission?',
u'address prefix',
u'street',
u'city',
u'zip',
u'country',
u'number',
u'e-mail address',
u'URL'],
['',
u'Liebig',
'',
u'church',
'',
True,
'',
'',
u'Testhausen',
'',
u'Germany',
'',
'',
'']] == [work_sheet_0.row_values(rx)
for rx in range(work_sheet_0.nrows)]
def test_xls__DefaultsExport__4(
translated_address_book, FieldFactory, FullPersonFactory,
PostalAddressFactory, browser):
"""`DefaultsExport` translates field names into user's language."""
create_person(u'Fotoerlaubnis?', u'Kirche', translated_address_book,
FieldFactory, FullPersonFactory, PostalAddressFactory)
# As visitors are allowed to search and export, so we log in as a
# visitor, to enable translation, we also send an accept-language
# header:
browser.login('visitor')
browser.lang('de-DE')
browser.open(browser.SEARCH_BY_KEYWORD_URL)
browser.getControl('Schlagwörter').displayValue = ['Kirche']
browser.getControl('Suchen').click()
browser.getControl('Auf ausgewählte Personen anwenden').displayValue = [
'bevorzugte']
browser.getControl(name='form.buttons.apply').click()
assert 'application/vnd.ms-excel' == browser.headers['Content-Type']
xls_workbook = xlrd.open_workbook(file_contents=browser.contents)
work_sheet_0 = xls_workbook.sheet_by_index(0)
assert (3, 14) == (work_sheet_0.nrows, work_sheet_0.ncols)
assert [
[u'Person',
'',
'',
'',
'',
'',
u'Anschrift',
'',
'',
'',
'',
u'Telefonnummer',
u'E-Mail-Adresse',
u'Homepage-Adresse'],
[u'Vorname',
u'Familienname',
u'Geburtsdatum',
u'Schlagwörter',
u'Anmerkungen',
u'Fotoerlaubnis?',
u'Adresszusatz',
u'Straße',
u'Ort',
u'PLZ',
u'Land',
u'Nummer',
u'E-Mail-Adresse',
u'URL'],
['',
u'Liebig',
'',
u'Kirche',
'',
1,
'',
'',
u'Testhausen',
'',
u'Deutschland',
'',
'',
'']] == [work_sheet_0.row_values(rx)
for rx in range(work_sheet_0.nrows)]
def test_xls__CompleteExport__1(
translated_address_book, FieldFactory, FullPersonFactory,
PostalAddressFactory, browser):
"""`CompleteExport` translates field names into user's language."""
create_person(u'Fotoerlaubnis?', u'Kirche', translated_address_book,
FieldFactory, FullPersonFactory, PostalAddressFactory)
# The complete export is not really different from the main one:
browser.login('visitor')
browser.lang('de-DE')
browser.open(browser.SEARCH_BY_KEYWORD_URL)
browser.getControl('Schlagwörter').displayValue = ['Kirche']
browser.getControl('Suchen').click()
browser.getControl('Auf ausgewählte Personen anwenden').displayValue = [
'vollständig']
browser.getControl(name='form.buttons.apply').click()
assert 'application/vnd.ms-excel' == browser.headers['Content-Type']
xls_workbook = xlrd.open_workbook(file_contents=browser.contents)
work_sheet_0 = xls_workbook.sheet_by_index(0)
assert (3, 19) == (work_sheet_0.nrows, work_sheet_0.ncols)
assert [
[u'Person',
'',
'',
'',
'',
'',
u'bevorzugte Anschrift',
'',
'',
'',
'',
u'weitere Anschrift',
'',
'',
'',
'',
u'bevorzugte Telefonnummer',
u'bevorzugte E-Mail-Adresse',
u'bevorzugte Homepage-Adresse'],
[u'Vorname',
u'Familienname',
u'Geburtsdatum',
u'Schlagwörter',
u'Anmerkungen',
u'Fotoerlaubnis?',
u'Adresszusatz',
u'Straße',
u'Ort',
u'PLZ',
u'Land',
u'Adresszusatz',
u'Straße',
u'Ort',
u'PLZ',
u'Land',
u'Nummer',
u'E-Mail-Adresse',
u'URL'],
['',
u'Liebig',
'',
u'Kirche',
'',
1,
'',
'',
u'Testhausen',
'',
u'Deutschland',
'',
'',
'',
u'00001',
u'Deutschland',
'',
'',
'']] == [work_sheet_0.row_values(rx)
for rx in range(work_sheet_0.nrows)]
def test_xls__CompleteExport__2(search_data, browser):
"""`CompleteExport` does not fail if no person is selected."""
browser.login('visitor')
browser.keyword_search('friends')
browser.getControl('Apply on selected persons').displayValue = [
'XLS export complete']
browser.getControl(
name='persons:list').getControl(value="Person").selected = False
browser.getControl(name='form.buttons.apply').click()
# When the user chooses no person for export a nearly empty sheet gets
# exported.
xls_workbook = xlrd.open_workbook(file_contents=browser.contents)
work_sheet_0 = xls_workbook.sheet_by_index(0)
assert (2, 5) == (work_sheet_0.nrows, work_sheet_0.ncols)
assert [
[u'person', '', '', '', ''],
[u'first name',
u'last name',
u'birth date',
u'keywords',
u'notes']] == [work_sheet_0.row_values(rx)
for rx in range(work_sheet_0.nrows)]
| 28.33995 | 78 | 0.52456 |
9b062ee31ce7ce79c8035ca0bfec69b40d8fe30a
| 2,954 |
py
|
Python
|
tools/nntool/graph/matches/matchers/concat_split.py
|
mfkiwl/gap_sdk
|
642b798dfdc7b85ccabe6baba295033f0eadfcd4
|
[
"Apache-2.0"
] | null | null | null |
tools/nntool/graph/matches/matchers/concat_split.py
|
mfkiwl/gap_sdk
|
642b798dfdc7b85ccabe6baba295033f0eadfcd4
|
[
"Apache-2.0"
] | null | null | null |
tools/nntool/graph/matches/matchers/concat_split.py
|
mfkiwl/gap_sdk
|
642b798dfdc7b85ccabe6baba295033f0eadfcd4
|
[
"Apache-2.0"
] | 1 |
2021-11-11T02:12:25.000Z
|
2021-11-11T02:12:25.000Z
|
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
from graph.types import ConcatParameters, NNEdge, SplitParameters
from utils.graph import GraphView
from ..matcher import Matcher, groups, match_name, description
LOG = logging.getLogger("nntool." + __name__)
@groups('*')
@match_name("concat_split")
@description("removes concat/split pair where all in edges on the concat match the out edges on the split")
class ConcatSplitMatch(Matcher):
def _match(self, G: GraphView, set_identity: bool = True, **kwargs) -> bool:
has_modified_graph = False
for split_node in set([node for node in G.nodes() if isinstance(node, SplitParameters)]):
in_edges = G.in_edges(split_node.name)
if len(in_edges) > 1:
continue
in_edge = in_edges[0]
if not isinstance(in_edge.from_node, ConcatParameters):
continue
concat_node = in_edge.from_node
if len(G.out_edges(concat_node.name)) > 1:
continue
if concat_node.transpose_out or split_node.transpose_in:
continue
if concat_node.axis != split_node.axis:
continue
axis = concat_node.axis
split_out_sizes = [out_shape[axis] for out_shape in split_node.out_shapes]
if len(split_out_sizes) != len(concat_node.in_dims):
continue
if not all(split_out_sizes[idx] == in_dim.shape[axis] for idx, in_dim in enumerate(concat_node.in_dims)):
continue
has_modified_graph = True
LOG.info("removing unnecessary concat/split pair %s/%s", concat_node.name, split_node.name)
concat_in_edges = G.indexed_in_edges(concat_node.name)
split_out_edges = G.indexed_out_edges(split_node.name)
G.remove(split_node)
G.remove(concat_node)
for idx, in_edge in enumerate(concat_in_edges):
for out_edge in split_out_edges[idx]:
G.add_edge(NNEdge(from_node=in_edge.from_node, from_idx=in_edge.from_idx,
to_node=out_edge.to_node, to_idx=out_edge.to_idx))
if set_identity:
self.set_identity(G)
return has_modified_graph
| 44.089552 | 117 | 0.667569 |
970c5688b87800c968cdb10be770fa14220d07ad
| 13,880 |
py
|
Python
|
src/SFA.py
|
wangshansong1/SFA
|
5a838096ee33d9dc97269aba2fd95295b83ad4d8
|
[
"MIT"
] | 10 |
2016-12-01T08:11:41.000Z
|
2022-01-03T11:59:29.000Z
|
src/SFA.py
|
wangshansong1/SFA
|
5a838096ee33d9dc97269aba2fd95295b83ad4d8
|
[
"MIT"
] | 1 |
2018-10-29T04:03:11.000Z
|
2018-10-29T04:03:11.000Z
|
src/SFA.py
|
Bollegala/SFA
|
5a838096ee33d9dc97269aba2fd95295b83ad4d8
|
[
"MIT"
] | 1 |
2018-11-01T01:36:51.000Z
|
2018-11-01T01:36:51.000Z
|
"""
Peform Spectral Feature Alignment for Cross-Domain Sentiment Classification.
@inproceedings{Pan:WWW:2010,
Author = {Sinno Jialin Pan and Xiaochuan Ni and Jian-Tao Sun and Qiang Yang and Zheng Chen},
Booktitle = {WWW 2010},
Title = {Cross-Domain Sentiment Classification via Spectral Feature Alignment},
Year = {2010}}
Danushka Bollegala.
2013/09/25
"""
import sys
import math
import numpy as np
import scipy.io as sio
import scipy.sparse as sp
from sparsesvd import sparsesvd
import subprocess
import features
def trainLBFGS(train_file, model_file):
"""
Train lbfgs on train file. and evaluate on test file.
Read the output file and return the classification accuracy.
"""
retcode = subprocess.call(
"classias-train -tb -a lbfgs.logistic -pc1=0 -pc2=1 -m %s %s > /dev/null" %\
(model_file, train_file), shell=True)
return retcode
def testLBFGS(test_file, model_file):
"""
Evaluate on the test file.
Read the output file and return the classification accuracy.
"""
output = "../work/output"
retcode = subprocess.call("cat %s | classias-tag -m %s -t > %s" %\
(test_file, model_file, output), shell=True)
F = open(output)
accuracy = 0
correct = 0
total = 0
for line in F:
if line.startswith("Accuracy"):
p = line.strip().split()
accuracy = float(p[1])
F.close()
return accuracy
def generateFeatureVectors(domain):
"""
Create feature vectors for each review in the domain.
"""
FeatGen = features.FEATURE_GENERATOR()
for (mode, label) in [("train", "positive"), ("train", "negative"), ("train", "unlabeled"),
("test", "positive"), ("test", "negative")]:
fname = "../reviews/%s-data/%s/%s.tagged" % (mode, domain, label)
fvects = FeatGen.process_file(fname, label)
writeFeatureVectorsToFile(fvects, "../work/%s/%s.%s" % (domain, mode, label))
pass
def writeFeatureVectorsToFile(fvects, fname):
"""
Write each feature vector in fvects in a single line in fname.
"""
F = open(fname, 'w')
for e in fvects:
for w in e[1].keys():
F.write("%s " % w)
F.write("\n")
F.close()
pass
def getCounts(S, M, fname):
"""
Get the feature co-occurrences in the file fname and append
those to the dictionary M. We only consider features in S.
"""
count = 0
F = open(fname)
for line in F:
count += 1
#if count > 1000:
# break
allP = line.strip().split()
p = []
for w in allP:
if w in S:
p.append(w)
n = len(p)
for i in range(0,n):
for j in range(i + 1, n):
pair = (p[i], p[j])
rpair = (p[j], p[i])
if pair in M:
M[pair] += 1
elif rpair in M:
M[rpair] += 1
else:
M[pair] = 1
F.close()
pass
def getVocab(S, fname):
"""
Get the frequency of each feature in the file named fname.
"""
F = open(fname)
for line in F:
p = line.strip().split()
for w in p:
S[w] = S.get(w, 0) + 1
F.close()
pass
def selectTh(h, t):
"""
Select all elements of the dictionary h with frequency greater than t.
"""
p = {}
for (key, val) in h.iteritems():
if val > t:
p[key] = val
del(h)
return p
def getVal(x, y, M):
"""
Returns the value of the element (x,y) in M.
"""
if (x,y) in M:
return M[(x,y)]
elif (y,x) in M:
return M[(y,x)]
else:
return 0
pass
def createMatrix(source, target):
"""
Read the unlabeled data (test and train) for both source and the target domains.
Compute the full co-occurrence matrix. Drop co-occurrence pairs with a specified
minimum threshold. For a feature w, compute its score(w),
score(w) = {\sum_{x \in S} pmi(w, x)} + {\sum_{y \in T} pmi(w, y)}
and sort the features in the descending order of their scores.
Write the co-occurrence matrix to a file with name source-target.cooc (fid, fid, cooc) and the
scores to a file with name source-target.pmi (feat, fid, score).
"""
# Parameters
domainTh = {'books':20, 'dvd':100, 'kitchen':20, 'electronics':20}
SourceFreqTh = domainTh[source]
TargetFreqTh = domainTh[target]
coocTh = 5
noPivots = 500
print "Source = %s, Target = %s" % (source, target)
# Get the set of source domain features.
S = {}
getVocab(S, "../work/%s/train.positive" % source)
getVocab(S, "../work/%s/train.negative" % source)
getVocab(S, "../work/%s/train.unlabeled" % source)
print "Total source features =", len(S)
# Remove source domain features with total frequency less than SourceFreqTh
S = selectTh(S, SourceFreqTh)
print "After thresholding at %d we have = %d" % (SourceFreqTh, len(S))
# Get the set of target domain features.
T = {}
getVocab(T, "../work/%s/train.positive" % target) # labels not used.
getVocab(T, "../work/%s/train.negative" % target) # labels not used.
getVocab(T, "../work/%s/train.unlabeled" % target)
print "Total target features =", len(T)
# Remove target domain features with total frequency less than TargetFreqTh
T = selectTh(T, TargetFreqTh)
print "After thresholding at %d we have = %d" % (TargetFreqTh, len(T))
# Get the union (and total frequency in both domains) for all features.
V = S.copy()
for w in T:
V[w] = S.get(w, 0) + T[w]
# Compute the co-occurrences of features in reviews
M = {}
print "Vocabulary size =", len(V)
getCounts(V, M, "../work/%s/train.positive" % source)
print "%s positive %d" % (source, len(M))
getCounts(V, M, "../work/%s/train.negative" % source)
print "%s negative %d" % (source, len(M))
getCounts(V, M, "../work/%s/train.unlabeled" % source)
print "%s unlabeled %d" % (source, len(M))
getCounts(V, M, "../work/%s/train.positive" % target)
print "%s positive %d" % (target, len(M))
getCounts(V, M, "../work/%s/train.negative" % target)
print "%s negative %d" % (target, len(M))
getCounts(V, M, "../work/%s/train.unlabeled" % target)
print "%s unlabeled %d" % (target, len(M))
# Remove co-occurrence less than the coocTh
M = selectTh(M, coocTh)
# Compute the intersection of source and target domain features.
pivots = set(S.keys()).intersection(set(T.keys()))
print "Total no. of pivots =", len(pivots)
# Compute PMI scores for pivots.
C = {}
N = sum(V.values())
i = 0
for pivot in pivots:
C[pivot] = 0.0
i += 1
for w in S:
val = getVal(pivot, w, M)
C[pivot] += 0 if (val < coocTh) else getPMI(val, V[w], V[pivot], N)
for w in T:
val = getVal(pivot, w, M)
C[pivot] += 0 if (val < coocTh) else getPMI(val, V[w], V[pivot], N)
if i % 500 == 0:
print "%d: pivot = %s, MI = %.4g" % (i, pivot, C[pivot])
pivotList = C.items()
pivotList.sort(lambda x, y: -1 if x[1] > y[1] else 1)
# write pivots to a file.
pivotsFile = open("../work/%s-%s/DI_list" % (source, target), 'w')
DI = []
for (i, (w, v)) in enumerate(pivotList[:noPivots]):
pivotsFile.write("%d %s P %s\n" % (i+1, w, str(v)))
DI.append(w)
pivotsFile.close()
DSwords = set(S.keys()).union(set(T.keys())) - pivots
DSList = list(DSwords)
print "Total no. of domain specific features =", len(DSList)
# Domain specific feature list.
DSFile = open("../work/%s-%s/DS_list" % (source, target), 'w')
count = 0
for w in DSList:
count += 1
DSFile.write("%d %s\n" % (count, w))
DSFile.close()
nDS = len(DSList)
nDI = len(DI)
# Compute matrix DSxSI and save it.
R = np.zeros((nDS, nDI), dtype=np.float)
for i in range(0, nDS):
for j in range(0, nDI):
val = getVal(DSList[i], DI[j], M)
if val > coocTh:
R[i,j] = val
print "Writing DSxDI.mat...",
sio.savemat("../work/%s-%s/DSxDI.mat" % (source, target), {'DSxDI':R})
print "Done"
pass
def getPMI(n, x, y, N):
"""
Compute the weighted PMI value.
"""
pmi = math.log((float(n) * float(N)) / (float(x) * float(y)))
res = pmi * (float(n) / float(N))
return 0 if res < 0 else res
def generateAll():
"""
Generate matrices for all pairs of domains.
"""
domains = ["books", "electronics", "dvd", "kitchen"]
for source in domains:
for target in domains:
if source == target:
continue
createMatrix(source, target)
pass
def learnProjection(sourceDomain, targetDomain):
"""
Learn the projection matrix and store it to a file.
"""
h = 50 # no. of latent dimensions.
print "Loading the bipartite matrix...",
coocData = sio.loadmat("../work/%s-%s/DSxDI.mat" % (sourceDomain, targetDomain))
M = sp.lil_matrix(coocData['DSxDI'])
(nDS, nDI) = M.shape
print "Done."
print "Computing the Laplacian...",
D1 = sp.lil_matrix((nDS, nDS), dtype=np.float64)
D2 = sp.lil_matrix((nDI, nDI), dtype=np.float64)
for i in range(0, nDS):
D1[i,i] = 1.0 / np.sqrt(np.sum(M[i,:].data[0]))
for i in range(0, nDI):
D2[i,i] = 1.0 / np.sqrt(np.sum(M[:,i].T.data[0]))
B = (D1.tocsr().dot(M.tocsr())).dot(D2.tocsr())
print "Done."
print "Computing SVD...",
ut, s, vt = sparsesvd(B.tocsc(), h)
sio.savemat("../work/%s-%s/proj.mat" % (sourceDomain, targetDomain), {'proj':ut.T})
print "Done."
pass
def evaluate_SA(source, target, project):
"""
Report the cross-domain sentiment classification accuracy.
"""
gamma = 1.0
print "Source Domain", source
print "Target Domain", target
if project:
print "Projection ON", "Gamma = %f" % gamma
else:
print "Projection OFF"
# Load the projection matrix.
M = sp.csr_matrix(sio.loadmat("../work/%s-%s/proj.mat" % (source, target))['proj'])
(nDS, h) = M.shape
# Load the domain specific features.
DSfeat = {}
DSFile = open("../work/%s-%s/DS_list" % (source, target))
for line in DSFile:
p = line.strip().split()
DSfeat[p[1].strip()] = int(p[0])
DSFile.close()
# write train feature vectors.
trainFileName = "../work/%s-%s/trainVects.SFA" % (source, target)
testFileName = "../work/%s-%s/testVects.SFA" % (source, target)
featFile = open(trainFileName, 'w')
count = 0
for (label, fname) in [(1, 'train.positive'), (-1, 'train.negative')]:
F = open("../work/%s/%s" % (source, fname))
for line in F:
count += 1
#print "Train ", count
words = set(line.strip().split())
# write the original features.
featFile.write("%d " % label)
x = sp.lil_matrix((1, nDS), dtype=np.float64)
for w in words:
#featFile.write("%s:1 " % w)
if w in DSfeat:
x[0, DSfeat[w] - 1] = 1
# write projected features.
if project:
y = x.tocsr().dot(M)
for i in range(0, h):
featFile.write("proj_%d:%f " % (i, gamma * y[0,i]))
featFile.write("\n")
F.close()
featFile.close()
# write test feature vectors.
featFile = open(testFileName, 'w')
count = 0
for (label, fname) in [(1, 'test.positive'), (-1, 'test.negative')]:
F = open("../work/%s/%s" % (target, fname))
for line in F:
count += 1
#print "Test ", count
words = set(line.strip().split())
# write the original features.
featFile.write("%d " % label)
x = sp.lil_matrix((1, nDS), dtype=np.float64)
for w in words:
#featFile.write("%s:1 " % w)
if w in DSfeat:
x[0, DSfeat[w] - 1] = 1
# write projected features.
if project:
y = x.dot(M)
for i in range(0, h):
featFile.write("proj_%d:%f " % (i, gamma * y[0,i]))
featFile.write("\n")
F.close()
featFile.close()
# Train using classias.
modelFileName = "../work/%s-%s/model.SFA" % (source, target)
trainLBFGS(trainFileName, modelFileName)
# Test using classias.
acc = testLBFGS(testFileName, modelFileName)
print "Accuracy =", acc
print "###########################################\n\n"
return acc
def batchEval():
"""
Evaluate on all 12 domain pairs.
"""
resFile = open("../work/batchSFA.csv", "w")
domains = ["books", "electronics", "dvd", "kitchen"]
resFile.write("Source, Target, NoProj, Proj\n")
for source in domains:
for target in domains:
if source == target:
continue
createMatrix(source, target)
learnProjection(source, target)
resFile.write("%s, %s, %f, %f\n" % (source, target,
evaluate_SA(source, target, False), evaluate_SA(source, target, True)))
resFile.flush()
resFile.close()
pass
if __name__ == "__main__":
source = "books"
target = "dvd"
#generateFeatureVectors("books")
#generateFeatureVectors("dvd")
#generateFeatureVectors("electronics")
#generateFeatureVectors("kitchen")
#generateAll()
#createMatrix(source, target)
#learnProjection(source, target)
#evaluate_SA(source, target, False)
#evaluate_SA(source, target, True)
batchEval()
| 32.429907 | 99 | 0.555908 |
9997540152d31940f55fa2c17dae9fbc9020b34f
| 1,030 |
py
|
Python
|
project_test/tests/conftest.py
|
xbello/crispy-forms-foundation
|
b66e7bf516f158539430b988ba22f2d2e0cb261d
|
[
"MIT"
] | null | null | null |
project_test/tests/conftest.py
|
xbello/crispy-forms-foundation
|
b66e7bf516f158539430b988ba22f2d2e0cb261d
|
[
"MIT"
] | null | null | null |
project_test/tests/conftest.py
|
xbello/crispy-forms-foundation
|
b66e7bf516f158539430b988ba22f2d2e0cb261d
|
[
"MIT"
] | null | null | null |
"""
Some fixture methods
"""
import os
import pytest
from crispy_forms.helper import FormHelper
from project_test.tests.utils import (get_rendered_template,
render_attempted_output)
@pytest.fixture(scope='session')
def output_test_path(pytestconfig):
"""Return absolute path to test outputs directory"""
return os.path.join(pytestconfig.rootdir.strpath, 'tests', 'output')
@pytest.fixture(scope='session')
def rendered_template():
"""
Return callable function to render form template
"""
return get_rendered_template
@pytest.fixture(scope='session')
def render_output():
"""
Return callable function to render output template
"""
return render_attempted_output
@pytest.fixture(scope='function', params=[
"foundation-5",
"foundation-6"
])
def helper(request):
"""
Parametrized fixture to return helper configured for a template pack
"""
helper = FormHelper()
helper.template_pack = request.param
return helper
| 21.914894 | 72 | 0.696117 |
697934777bce20535c7e719c6b4c0540c24b2b8d
| 12,016 |
py
|
Python
|
watcher/tests/api/base.py
|
mail2nsrajesh/watcher
|
5f179609d0ee145fc7957972c83593cce242884d
|
[
"Apache-2.0"
] | null | null | null |
watcher/tests/api/base.py
|
mail2nsrajesh/watcher
|
5f179609d0ee145fc7957972c83593cce242884d
|
[
"Apache-2.0"
] | null | null | null |
watcher/tests/api/base.py
|
mail2nsrajesh/watcher
|
5f179609d0ee145fc7957972c83593cce242884d
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base classes for API tests."""
# NOTE: Ported from ceilometer/tests/api.py (subsequently moved to
# ceilometer/tests/api/__init__.py). This should be oslo'ified:
# https://bugs.launchpad.net/watcher/+bug/1255115.
# NOTE(deva): import auth_token so we can override a config option
import copy
import mock
from oslo_config import cfg
import pecan
import pecan.testing
from six.moves.urllib import parse as urlparse
from watcher.api import hooks
from watcher.common import context as watcher_context
from watcher.notifications import service as n_service
from watcher.tests.db import base
PATH_PREFIX = '/v1'
class FunctionalTest(base.DbTestCase):
"""Pecan controller functional testing class.
Used for functional tests of Pecan controllers where you need to
test your literal application and its integration with the
framework.
"""
SOURCE_DATA = {'test_source': {'somekey': '666'}}
def setUp(self):
super(FunctionalTest, self).setUp()
cfg.CONF.set_override("auth_version", "v2.0",
group='keystone_authtoken')
cfg.CONF.set_override("admin_user", "admin",
group='keystone_authtoken')
p_services = mock.patch.object(n_service, "send_service_update",
new_callable=mock.PropertyMock)
self.m_services = p_services.start()
self.addCleanup(p_services.stop)
self.app = self._make_app()
def reset_pecan():
pecan.set_config({}, overwrite=True)
self.addCleanup(reset_pecan)
def _make_app(self, enable_acl=False):
# Determine where we are so we can set up paths in the config
root_dir = self.get_path()
self.config = {
'app': {
'root': 'watcher.api.controllers.root.RootController',
'modules': ['watcher.api'],
'hooks': [
hooks.ContextHook(),
hooks.NoExceptionTracebackHook()
],
'static_root': '%s/public' % root_dir,
'template_path': '%s/api/templates' % root_dir,
'enable_acl': enable_acl,
'acl_public_routes': ['/', '/v1'],
},
}
return pecan.testing.load_test_app(self.config)
def _request_json(self, path, params, expect_errors=False, headers=None,
method="post", extra_environ=None, status=None,
path_prefix=PATH_PREFIX):
"""Sends simulated HTTP request to Pecan test app.
:param path: url path of target service
:param params: content for wsgi.input of request
:param expect_errors: Boolean value; whether an error is expected based
on request
:param headers: a dictionary of headers to send along with the request
:param method: Request method type. Appropriate method function call
should be used rather than passing attribute in.
:param extra_environ: a dictionary of environ variables to send along
with the request
:param status: expected status code of response
:param path_prefix: prefix of the url path
"""
full_path = path_prefix + path
print('%s: %s %s' % (method.upper(), full_path, params))
response = getattr(self.app, "%s_json" % method)(
str(full_path),
params=params,
headers=headers,
status=status,
extra_environ=extra_environ,
expect_errors=expect_errors
)
print('GOT:%s' % response)
return response
def put_json(self, path, params, expect_errors=False, headers=None,
extra_environ=None, status=None):
"""Sends simulated HTTP PUT request to Pecan test app.
:param path: url path of target service
:param params: content for wsgi.input of request
:param expect_errors: Boolean value; whether an error is expected based
on request
:param headers: a dictionary of headers to send along with the request
:param extra_environ: a dictionary of environ variables to send along
with the request
:param status: expected status code of response
"""
return self._request_json(path=path, params=params,
expect_errors=expect_errors,
headers=headers, extra_environ=extra_environ,
status=status, method="put")
def post_json(self, path, params, expect_errors=False, headers=None,
extra_environ=None, status=None):
"""Sends simulated HTTP POST request to Pecan test app.
:param path: url path of target service
:param params: content for wsgi.input of request
:param expect_errors: Boolean value; whether an error is expected based
on request
:param headers: a dictionary of headers to send along with the request
:param extra_environ: a dictionary of environ variables to send along
with the request
:param status: expected status code of response
"""
return self._request_json(path=path, params=params,
expect_errors=expect_errors,
headers=headers, extra_environ=extra_environ,
status=status, method="post")
def patch_json(self, path, params, expect_errors=False, headers=None,
extra_environ=None, status=None):
"""Sends simulated HTTP PATCH request to Pecan test app.
:param path: url path of target service
:param params: content for wsgi.input of request
:param expect_errors: Boolean value; whether an error is expected based
on request
:param headers: a dictionary of headers to send along with the request
:param extra_environ: a dictionary of environ variables to send along
with the request
:param status: expected status code of response
"""
return self._request_json(path=path, params=params,
expect_errors=expect_errors,
headers=headers, extra_environ=extra_environ,
status=status, method="patch")
def delete(self, path, expect_errors=False, headers=None,
extra_environ=None, status=None, path_prefix=PATH_PREFIX):
"""Sends simulated HTTP DELETE request to Pecan test app.
:param path: url path of target service
:param expect_errors: Boolean value; whether an error is expected based
on request
:param headers: a dictionary of headers to send along with the request
:param extra_environ: a dictionary of environ variables to send along
with the request
:param status: expected status code of response
:param path_prefix: prefix of the url path
"""
full_path = path_prefix + path
print('DELETE: %s' % (full_path))
response = self.app.delete(str(full_path),
headers=headers,
status=status,
extra_environ=extra_environ,
expect_errors=expect_errors)
print('GOT:%s' % response)
return response
def get_json(self, path, expect_errors=False, headers=None,
extra_environ=None, q=[], path_prefix=PATH_PREFIX, **params):
"""Sends simulated HTTP GET request to Pecan test app.
:param path: url path of target service
:param expect_errors: Boolean value;whether an error is expected based
on request
:param headers: a dictionary of headers to send along with the request
:param extra_environ: a dictionary of environ variables to send along
with the request
:param q: list of queries consisting of: field, value, op, and type
keys
:param path_prefix: prefix of the url path
:param params: content for wsgi.input of request
"""
full_path = path_prefix + path
query_params = {'q.field': [],
'q.value': [],
'q.op': [],
}
for query in q:
for name in ['field', 'op', 'value']:
query_params['q.%s' % name].append(query.get(name, ''))
all_params = {}
all_params.update(params)
if q:
all_params.update(query_params)
print('GET: %s %r' % (full_path, all_params))
response = self.app.get(full_path,
params=all_params,
headers=headers,
extra_environ=extra_environ,
expect_errors=expect_errors)
if not expect_errors:
response = response.json
print('GOT:%s' % response)
return response
def validate_link(self, link, bookmark=False):
"""Checks if the given link can get correct data."""
# removes the scheme and net location parts of the link
url_parts = list(urlparse.urlparse(link))
url_parts[0] = url_parts[1] = ''
# bookmark link should not have the version in the URL
if bookmark and url_parts[2].startswith(PATH_PREFIX):
return False
full_path = urlparse.urlunparse(url_parts)
try:
self.get_json(full_path, path_prefix='')
return True
except Exception:
return False
class AdminRoleTest(base.DbTestCase):
def setUp(self):
super(AdminRoleTest, self).setUp()
token_info = {
'token': {
'project': {
'id': 'admin'
},
'user': {
'id': 'admin'
}
}
}
self.context = watcher_context.RequestContext(
auth_token_info=token_info,
project_id='admin',
user_id='admin')
def make_context(*args, **kwargs):
# If context hasn't been constructed with token_info
if not kwargs.get('auth_token_info'):
kwargs['auth_token_info'] = copy.deepcopy(token_info)
if not kwargs.get('project_id'):
kwargs['project_id'] = 'admin'
if not kwargs.get('user_id'):
kwargs['user_id'] = 'admin'
if not kwargs.get('roles'):
kwargs['roles'] = ['admin']
context = watcher_context.RequestContext(*args, **kwargs)
return watcher_context.RequestContext.from_dict(context.to_dict())
p = mock.patch.object(watcher_context, 'make_context',
side_effect=make_context)
self.mock_make_context = p.start()
self.addCleanup(p.stop)
| 41.150685 | 79 | 0.58264 |
21c8e0712145ce1edeaaf4267daa60c90d077e35
| 1,566 |
py
|
Python
|
tests/test_complex.py
|
SkyTruth/gpsdio-vector
|
fe0472d2304bb625a0a926810c014a01d0bc1b59
|
[
"Apache-2.0"
] | null | null | null |
tests/test_complex.py
|
SkyTruth/gpsdio-vector
|
fe0472d2304bb625a0a926810c014a01d0bc1b59
|
[
"Apache-2.0"
] | 2 |
2015-06-12T21:25:40.000Z
|
2015-06-15T20:20:15.000Z
|
tests/test_complex.py
|
SkyTruth/gpsdio-vector
|
fe0472d2304bb625a0a926810c014a01d0bc1b59
|
[
"Apache-2.0"
] | null | null | null |
"""
More complex tests
"""
from click.testing import CliRunner
import fiona as fio
import gpsdio.cli.main
from gpsdio_vector_driver.core import Vector
def test_additional_fields(tmpdir):
p = tmpdir.mkdir('testfiles')
testfile = str(p.join('out.shp'))
result = CliRunner().invoke(gpsdio.cli.main.main_group, [
'--o-drv', 'Vector',
'--o-drv-opt', 'driver=ESRI Shapefile',
'--o-drv-opt', 'fields=new_field:str:40,oth_n_fld:float:25.1',
'etl',
'tests/data/points.json',
testfile
])
assert result.exit_code == 0
with fio.open(testfile) as src:
for f in list(Vector.default_fields) + ['new_field', 'oth_n_fld']:
assert f in src.schema['properties']
assert 'str' in src.schema['properties']['new_field']
assert 'float' in src.schema['properties']['oth_n_fld']
def test_complex_via_api(tmpdir):
# Make sure we can give `fields` as a dict and `driver` via the API.
p = tmpdir.mkdir('testfiles')
testfile = str(p.join('out.geojson'))
do = {
'fields': {
'new_field': 'str'
},
'driver': 'GeoJSON'
}
with gpsdio.open('tests/data/points.json') as src:
with gpsdio.open(testfile, 'w', driver='Vector', do=do) as dst:
for idx, msg in enumerate(src):
dst.write(msg)
num_msgs = idx + 1
with fio.open(testfile) as src:
assert src.driver == 'GeoJSON'
assert 'str' in src.schema['properties']['new_field']
assert num_msgs == len(src)
| 25.672131 | 74 | 0.601533 |
01cf7e14f3b8f525f047842ca8f587466c5c04a2
| 59 |
py
|
Python
|
earlier-2020/python_mod_tutorials/mlutiprocess/rpc.py
|
transcendentsky/py_tutorials
|
fed8e6c8d79f854a1cebcfd5c37297a163846208
|
[
"Apache-2.0"
] | 1 |
2018-06-18T12:09:33.000Z
|
2018-06-18T12:09:33.000Z
|
earlier-2020/python_mod_tutorials/mlutiprocess/rpc.py
|
transcendentsky/py_tutorials
|
fed8e6c8d79f854a1cebcfd5c37297a163846208
|
[
"Apache-2.0"
] | null | null | null |
earlier-2020/python_mod_tutorials/mlutiprocess/rpc.py
|
transcendentsky/py_tutorials
|
fed8e6c8d79f854a1cebcfd5c37297a163846208
|
[
"Apache-2.0"
] | 1 |
2018-06-18T12:13:21.000Z
|
2018-06-18T12:13:21.000Z
|
# -*- coding:utf-8 -*-
# rpc 一般俗称,远程过程调用,把本地的函数,放到远端去调用。
| 11.8 | 33 | 0.610169 |
6bc751457c9fd5b3371759d642d9293d8aa78e3e
| 6,621 |
py
|
Python
|
geocachingapi/geocachingapi.py
|
Sholofly/geocachingapi-python
|
759d4da202975b5adda8a968eed10681f4c3d371
|
[
"MIT"
] | 1 |
2021-04-21T19:24:41.000Z
|
2021-04-21T19:24:41.000Z
|
geocachingapi/geocachingapi.py
|
Sholofly/geocachingapi-python
|
759d4da202975b5adda8a968eed10681f4c3d371
|
[
"MIT"
] | 1 |
2021-06-05T11:03:46.000Z
|
2021-06-05T11:03:46.000Z
|
geocachingapi/geocachingapi.py
|
Sholofly/geocachingapi-python
|
759d4da202975b5adda8a968eed10681f4c3d371
|
[
"MIT"
] | 1 |
2021-06-05T09:34:25.000Z
|
2021-06-05T09:34:25.000Z
|
"""Class for managing one Geocaching API integration."""
from __future__ import annotations
import asyncio
import json
import logging
import socket
import async_timeout
import backoff
from yarl import URL
from aiohttp import ClientResponse, ClientSession, ClientError
from typing import Any, Awaitable, Callable, Dict, List, Optional
from .const import (
GEOCACHING_API_BASE_PATH,
GEOCACHING_API_HOST,
GEOCACHING_API_PORT,
GEOCACHING_API_SCHEME,
GEOCACHING_API_VERSION,
)
from .exceptions import (
GeocachingApiConnectionError,
GeocachingApiConnectionTimeoutError,
GeocachingApiError,
GeocachingApiRateLimitError,
)
from .models import (
GeocachingStatus,
GeocachingSettings
)
_LOGGER = logging.getLogger(__name__)
class GeocachingApi:
""" Main class to control the Geocaching API"""
_close_session: bool = False
_status: GeocachingStatus = None
_settings: GeocachingSettings = None
def __init__(
self,
*,
token: str,
settings: GeocachingSettings = None,
request_timeout: int = 8,
session: Optional[ClientSession] = None,
token_refresh_method: Optional[Callable[[], Awaitable[str]]] = None
) -> None:
"""Initialize connection with the Geocaching API."""
self._status = GeocachingStatus()
self._settings = settings or GeocachingSettings(False)
self._session = session
self.request_timeout = request_timeout
self.token = token
self.token_refresh_method = token_refresh_method
@backoff.on_exception(backoff.expo, GeocachingApiConnectionError, max_tries=3, logger=_LOGGER)
@backoff.on_exception(
backoff.expo, GeocachingApiRateLimitError, base=60, max_tries=6, logger=_LOGGER
)
async def _request(self, method, uri, **kwargs) -> ClientResponse:
"""Make a request."""
if self.token_refresh_method is not None:
self.token = await self.token_refresh_method()
_LOGGER.debug(f'Token refresh method called.')
url = URL.build(
scheme=GEOCACHING_API_SCHEME,
host=GEOCACHING_API_HOST,
port=GEOCACHING_API_PORT,
path=GEOCACHING_API_BASE_PATH,
).join(URL(uri))
_LOGGER.debug(f'Executing {method} API request to {url}.')
headers = kwargs.get("headers")
if headers is None:
headers = {}
else:
headers = dict(headers)
headers["Authorization"] = f"Bearer {self.token}"
_LOGGER.debug(f'With headers:')
_LOGGER.debug(f'{str(headers)}')
if self._session is None:
self._session = ClientSession()
_LOGGER.debug(f'New session created.')
self._close_session = True
try:
with async_timeout.timeout(self.request_timeout):
response = await self._session.request(
method,
f"{url}",
**kwargs,
headers=headers,
)
except asyncio.TimeoutError as exception:
raise GeocachingApiConnectionTimeoutError(
"Timeout occurred while connecting to the Geocaching API"
) from exception
except (ClientError, socket.gaierror) as exception:
raise GeocachingApiConnectionError(
"Error occurred while communicating with the Geocaching API"
) from exception
content_type = response.headers.get("Content-Type", "")
# Error handling
if (response.status // 100) in [4, 5]:
contents = await response.read()
response.close()
if response.status == 429:
raise GeocachingApiRateLimitError(
"Rate limit error has occurred with the Geocaching API"
)
if content_type == "application/json":
raise GeocachingApiError(response.status, json.loads(contents.decode("utf8")))
raise GeocachingApiError(response.status, {"message": contents.decode("utf8")})
# Handle empty response
if response.status == 204:
_LOGGER.warning(f'Request to {url} resulted in status 204. Your dataset could be out of date.')
return
if "application/json" in content_type:
result = await response.json()
_LOGGER.debug(f'Response:')
_LOGGER.debug(f'{str(result)}')
return result
result = await response.text()
_LOGGER.debug(f'Response:')
_LOGGER.debug(f'{str(result)}')
return result
async def update(self) -> GeocachingStatus:
await self._update_user(None)
if self._settings.fetch_trackables:
await self._update_trackables()
_LOGGER.info(f'Status updated.')
return self._status
async def _update_user(self, data: Dict[str, Any] = None) -> None:
assert self._status
if data is None:
fields = ",".join([
"username",
"referenceCode",
"findCount",
"hideCount",
"favoritePoints",
"souvenirCount",
"awardedFavoritePoints",
"membershipLevelId"
])
data = await self._request("GET", f"/{GEOCACHING_API_VERSION}/users/me?fields={fields}")
self._status.update_user_from_dict(data)
_LOGGER.debug(f'User updated.')
async def _update_trackables(self, data: Dict[str, Any] = None) -> None:
assert self._status
if data is None:
fields = ",".join([
"referenceCode",
"name",
"holder",
"trackingNumber",
"kilometersTraveled",
"currentGeocacheCode",
"currentGeocacheName"
])
data = await self._request("GET", f"/{GEOCACHING_API_VERSION}/trackables?fields={fields}&type=3")
self._status.update_trackables_from_dict(data)
_LOGGER.debug(f'Trackables updated.')
async def close(self) -> None:
"""Close open client session."""
if self._session and self._close_session:
await self._session.close()
_LOGGER.debug(f'Session closed.')
async def __aenter__(self) -> GeocachingApi:
"""Async enter."""
return self
async def __aexit__(self, *exc_info) -> None:
"""Async exit."""
await self.close()
| 34.664921 | 109 | 0.599456 |
b192b004899e795b2f14f34fe215be5496650975
| 8,490 |
py
|
Python
|
mythril/leveldb/client.py
|
LoCorVin/mythril
|
3cdc86d4f91dc746632a74c3b662fdcd6cef8033
|
[
"MIT"
] | 1 |
2018-09-07T10:17:35.000Z
|
2018-09-07T10:17:35.000Z
|
mythril/leveldb/client.py
|
LoCorVin/mythril
|
3cdc86d4f91dc746632a74c3b662fdcd6cef8033
|
[
"MIT"
] | null | null | null |
mythril/leveldb/client.py
|
LoCorVin/mythril
|
3cdc86d4f91dc746632a74c3b662fdcd6cef8033
|
[
"MIT"
] | 1 |
2018-06-14T08:36:03.000Z
|
2018-06-14T08:36:03.000Z
|
import binascii
import rlp
from mythril.leveldb.accountindexing import CountableList
from mythril.leveldb.accountindexing import ReceiptForStorage, AccountIndexer
import logging
from ethereum import utils
from ethereum.block import BlockHeader, Block
from mythril.leveldb.state import State
from mythril.leveldb.eth_db import ETH_DB
from mythril.ether.ethcontract import ETHContract
from mythril.exceptions import AddressNotFoundError
# Per https://github.com/ethereum/go-ethereum/blob/master/core/rawdb/schema.go
# prefixes and suffixes for keys in geth
header_prefix = b'h' # header_prefix + num (uint64 big endian) + hash -> header
body_prefix = b'b' # body_prefix + num (uint64 big endian) + hash -> block body
num_suffix = b'n' # header_prefix + num (uint64 big endian) + num_suffix -> hash
block_hash_prefix = b'H' # block_hash_prefix + hash -> num (uint64 big endian)
block_receipts_prefix = b'r' # block_receipts_prefix + num (uint64 big endian) + hash -> block receipts
# known geth keys
head_header_key = b'LastBlock' # head (latest) header hash
# custom prefixes
address_prefix = b'AM' # address_prefix + hash -> address
# custom keys
address_mapping_head_key = b'accountMapping' # head (latest) number of indexed block
def _format_block_number(number):
'''
formats block number to uint64 big endian
'''
return utils.zpad(utils.int_to_big_endian(number), 8)
def _encode_hex(v):
'''
encodes hash as hex
'''
return '0x' + utils.encode_hex(v)
class LevelDBReader(object):
'''
level db reading interface, can be used with snapshot
'''
def __init__(self, db):
self.db = db
self.head_block_header = None
self.head_state = None
def _get_head_state(self):
'''
gets head state
'''
if not self.head_state:
root = self._get_head_block().state_root
self.head_state = State(self.db, root)
return self.head_state
def _get_account(self, address):
'''
gets account by address
'''
state = self._get_head_state()
account_address = binascii.a2b_hex(utils.remove_0x_head(address))
return state.get_and_cache_account(account_address)
def _get_block_hash(self, number):
'''
gets block hash by block number
'''
num = _format_block_number(number)
hash_key = header_prefix + num + num_suffix
return self.db.get(hash_key)
def _get_head_block(self):
'''
gets head block header
'''
if not self.head_block_header:
hash = self.db.get(head_header_key)
num = self._get_block_number(hash)
self.head_block_header = self._get_block_header(hash, num)
# find header with valid state
while not self.db.get(self.head_block_header.state_root) and self.head_block_header.prevhash is not None:
hash = self.head_block_header.prevhash
num = self._get_block_number(hash)
self.head_block_header = self._get_block_header(hash, num)
return self.head_block_header
def _get_block_number(self, hash):
'''
gets block number by hash
'''
number_key = block_hash_prefix + hash
return self.db.get(number_key)
def _get_block_header(self, hash, num):
'''
get block header by block header hash & number
'''
header_key = header_prefix + num + hash
block_header_data = self.db.get(header_key)
header = rlp.decode(block_header_data, sedes=BlockHeader)
return header
def _get_address_by_hash(self, hash):
'''
get mapped address by its hash
'''
address_key = address_prefix + hash
return self.db.get(address_key)
def _get_last_indexed_number(self):
'''
latest indexed block number
'''
return self.db.get(address_mapping_head_key)
def _get_block_receipts(self, hash, num):
'''
get block transaction receipts by block header hash & number
'''
number = _format_block_number(num)
receipts_key = block_receipts_prefix + number + hash
receipts_data = self.db.get(receipts_key)
receipts = rlp.decode(receipts_data, sedes=CountableList(ReceiptForStorage))
return receipts
class LevelDBWriter(object):
'''
level db writing interface
'''
def __init__(self, db):
self.db = db
self.wb = None
def _set_last_indexed_number(self, number):
'''
sets latest indexed block number
'''
return self.db.put(address_mapping_head_key, _format_block_number(number))
def _start_writing(self):
'''
start writing a batch
'''
self.wb = self.db.write_batch()
def _commit_batch(self):
'''
commit batch
'''
self.wb.write()
def _store_account_address(self, address):
'''
get block transaction receipts by block header hash & number
'''
address_key = address_prefix + utils.sha3(address)
self.wb.put(address_key, address)
class EthLevelDB(object):
'''
Go-Ethereum LevelDB client class
'''
def __init__(self, path):
self.path = path
self.db = ETH_DB(path)
self.reader = LevelDBReader(self.db)
self.writer = LevelDBWriter(self.db)
def get_contracts(self):
'''
iterate through all contracts
'''
for account in self.reader._get_head_state().get_all_accounts():
if account.code is not None:
code = _encode_hex(account.code)
contract = ETHContract(code, enable_online_lookup=False)
yield contract, account.address, account.balance
def search(self, expression, callback_func):
'''
searches through all contract accounts
'''
cnt = 0
indexer = AccountIndexer(self)
for contract, address_hash, balance in self.get_contracts():
if contract.matches_expression(expression):
try:
address = _encode_hex(indexer.get_contract_by_hash(address_hash))
except AddressNotFoundError:
'''
The hash->address mapping does not exist in our index. If the index is up-to-date, this likely means
that the contract was created by an internal transaction. Skip this contract as right now we don't
have a good solution for this.
'''
continue
callback_func(contract, address, balance)
cnt += 1
if not cnt % 1000:
logging.info("Searched %d contracts" % cnt)
def contract_hash_to_address(self, hash):
'''
tries to find corresponding account address
'''
address_hash = binascii.a2b_hex(utils.remove_0x_head(hash))
indexer = AccountIndexer(self)
return _encode_hex(indexer.get_contract_by_hash(address_hash))
def eth_getBlockHeaderByNumber(self, number):
'''
gets block header by block number
'''
hash = self.reader._get_block_hash(number)
block_number = _format_block_number(number)
return self.reader._get_block_header(hash, block_number)
def eth_getBlockByNumber(self, number):
'''
gets block body by block number
'''
block_hash = self.reader._get_block_hash(number)
block_number = _format_block_number(number)
body_key = body_prefix + block_number + block_hash
block_data = self.db.get(body_key)
body = rlp.decode(block_data, sedes=Block)
return body
def eth_getCode(self, address):
'''
gets account code
'''
account = self.reader._get_account(address)
return _encode_hex(account.code)
def eth_getBalance(self, address):
'''
gets account balance
'''
account = self.reader._get_account(address)
return account.balance
def eth_getStorageAt(self, address, position):
'''
gets account storage data at position
'''
account = self.reader._get_account(address)
return _encode_hex(utils.zpad(utils.encode_int(account.get_storage_data(position)), 32))
| 31.679104 | 120 | 0.629918 |
793209c33cac5fdd6dfa29d3f9e9fff496d39815
| 897 |
py
|
Python
|
share/qt/clean_mac_info_plist.py
|
zebbra2014/lootcoin
|
a6bf4df4ce0e7df32e13b06ee19706fdf3c276a3
|
[
"MIT"
] | null | null | null |
share/qt/clean_mac_info_plist.py
|
zebbra2014/lootcoin
|
a6bf4df4ce0e7df32e13b06ee19706fdf3c276a3
|
[
"MIT"
] | null | null | null |
share/qt/clean_mac_info_plist.py
|
zebbra2014/lootcoin
|
a6bf4df4ce0e7df32e13b06ee19706fdf3c276a3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Jonas Schnelli, 2013
# make sure the LootCoin-Qt.app contains the right plist (including the right version)
# fix made because of serval bugs in Qt mac deployment (https://bugreports.qt-project.org/browse/QTBUG-21267)
from string import Template
from datetime import date
bitcoinDir = "./";
inFile = bitcoinDir+"/share/qt/Info.plist"
outFile = "LootCoin-Qt.app/Contents/Info.plist"
version = "unknown";
fileForGrabbingVersion = bitcoinDir+"bitcoin-qt.pro"
for line in open(fileForGrabbingVersion):
lineArr = line.replace(" ", "").split("=");
if lineArr[0].startswith("VERSION"):
version = lineArr[1].replace("\n", "");
fIn = open(inFile, "r")
fileContent = fIn.read()
s = Template(fileContent)
newFileContent = s.substitute(VERSION=version,YEAR=date.today().year)
fOut = open(outFile, "w");
fOut.write(newFileContent);
print "Info.plist fresh created"
| 29.9 | 109 | 0.725753 |
2a3c585734cb1118f529718f1ddef64d25f52832
| 5,246 |
py
|
Python
|
neutron/services/l3_router/l3_router_plugin.py
|
brandonlogan/neutron
|
57364544aa8b0e7cd9d73550f287bcad574ba08c
|
[
"Apache-2.0"
] | 1 |
2017-09-10T09:57:35.000Z
|
2017-09-10T09:57:35.000Z
|
neutron/services/l3_router/l3_router_plugin.py
|
brandonlogan/neutron
|
57364544aa8b0e7cd9d73550f287bcad574ba08c
|
[
"Apache-2.0"
] | null | null | null |
neutron/services/l3_router/l3_router_plugin.py
|
brandonlogan/neutron
|
57364544aa8b0e7cd9d73550f287bcad574ba08c
|
[
"Apache-2.0"
] | 1 |
2015-05-05T14:41:11.000Z
|
2015-05-05T14:41:11.000Z
|
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import constants as n_const
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_utils import importutils
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.rpc.handlers import l3_rpc
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.db import common_db_mixin
from neutron.db import dns_db
from neutron.db import extraroute_db
from neutron.db import l3_dvr_ha_scheduler_db
from neutron.db import l3_dvrscheduler_db
from neutron.db import l3_gwmode_db
from neutron.db import l3_hamode_db
from neutron.db import l3_hascheduler_db
from neutron.db.models import l3 as l3_models
from neutron.extensions import l3
from neutron.plugins.common import constants
from neutron.quota import resource_registry
from neutron import service
from neutron.services.l3_router.service_providers import driver_controller
from neutron.services import service_base
class L3RouterPlugin(service_base.ServicePluginBase,
common_db_mixin.CommonDbMixin,
extraroute_db.ExtraRoute_db_mixin,
l3_hamode_db.L3_HA_NAT_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
l3_dvr_ha_scheduler_db.L3_DVR_HA_scheduler_db_mixin,
dns_db.DNSDbMixin):
"""Implementation of the Neutron L3 Router Service Plugin.
This class implements a L3 service plugin that provides
router and floatingip resources and manages associated
request/response.
All DB related work is implemented in classes
l3_db.L3_NAT_db_mixin, l3_hamode_db.L3_HA_NAT_db_mixin,
l3_dvr_db.L3_NAT_with_dvr_db_mixin, and extraroute_db.ExtraRoute_db_mixin.
"""
supported_extension_aliases = ["dvr", "router", "ext-gw-mode",
"extraroute", "l3_agent_scheduler",
"l3-ha", "router_availability_zone",
"l3-flavors"]
__native_pagination_support = True
__native_sorting_support = True
@resource_registry.tracked_resources(router=l3_models.Router,
floatingip=l3_models.FloatingIP)
def __init__(self):
self.router_scheduler = importutils.import_object(
cfg.CONF.router_scheduler_driver)
self.add_periodic_l3_agent_status_check()
super(L3RouterPlugin, self).__init__()
if 'dvr' in self.supported_extension_aliases:
l3_dvrscheduler_db.subscribe()
if 'l3-ha' in self.supported_extension_aliases:
l3_hascheduler_db.subscribe()
self.agent_notifiers.update(
{n_const.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()})
rpc_worker = service.RpcWorker([self], worker_process_count=0)
self.add_worker(rpc_worker)
self.l3_driver_controller = driver_controller.DriverController(self)
@log_helpers.log_method_call
def start_rpc_listeners(self):
# RPC support
self.topic = topics.L3PLUGIN
self.conn = n_rpc.create_connection()
self.endpoints = [l3_rpc.L3RpcCallback()]
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
return self.conn.consume_in_threads()
@classmethod
def get_plugin_type(cls):
return constants.L3_ROUTER_NAT
def get_plugin_description(self):
"""returns string description of the plugin."""
return ("L3 Router Service Plugin for basic L3 forwarding"
" between (L2) Neutron networks and access to external"
" networks via a NAT gateway.")
def router_supports_scheduling(self, context, router_id):
return self.l3_driver_controller.uses_scheduler(context, router_id)
def create_floatingip(self, context, floatingip):
"""Create floating IP.
:param context: Neutron request context
:param floatingip: data for the floating IP being created
:returns: A floating IP object on success
As the l3 router plugin asynchronously creates floating IPs
leveraging the l3 agent, the initial status for the floating
IP object will be DOWN.
"""
return super(L3RouterPlugin, self).create_floatingip(
context, floatingip,
initial_status=n_const.FLOATINGIP_STATUS_DOWN)
def add_flavor_id(plugin, router_res, router_db):
router_res['flavor_id'] = router_db['flavor_id']
common_db_mixin.CommonDbMixin.register_dict_extend_funcs(
l3.ROUTERS, [add_flavor_id])
| 40.045802 | 78 | 0.70873 |
3dd5c2f247549877d77d66260769542b58f56fd9
| 22,022 |
py
|
Python
|
tortoise/models.py
|
ITgladiator/tortoise-orm
|
9a2bd0edd078ae12e5837c22f88c19f8cc84e7d7
|
[
"Apache-2.0"
] | null | null | null |
tortoise/models.py
|
ITgladiator/tortoise-orm
|
9a2bd0edd078ae12e5837c22f88c19f8cc84e7d7
|
[
"Apache-2.0"
] | null | null | null |
tortoise/models.py
|
ITgladiator/tortoise-orm
|
9a2bd0edd078ae12e5837c22f88c19f8cc84e7d7
|
[
"Apache-2.0"
] | null | null | null |
from copy import copy, deepcopy
from typing import Any, Dict, List, Optional, Set, Tuple, Type, TypeVar, Union
from pypika import Query
from tortoise import fields
from tortoise.backends.base.client import BaseDBAsyncClient # noqa
from tortoise.exceptions import ConfigurationError, OperationalError
from tortoise.fields import (
Field,
ManyToManyField,
ManyToManyRelationManager,
RelationQueryContainer,
)
from tortoise.filters import get_filters_for_field
from tortoise.queryset import QuerySet
from tortoise.transactions import current_transaction_map
MODEL_TYPE = TypeVar("MODEL_TYPE", bound="Model")
# TODO: Define Filter type object. Possibly tuple?
def get_unique_together(meta) -> Tuple[Tuple[str, ...], ...]:
unique_together = getattr(meta, "unique_together", None)
if isinstance(unique_together, (list, tuple)):
if unique_together and isinstance(unique_together[0], str):
unique_together = (unique_together,)
# return without validation, validation will be done further in the code
return unique_together
class MetaInfo:
__slots__ = (
"abstract",
"table",
"app",
"_fields",
"_db_fields",
"m2m_fields",
"fk_fields",
"backward_fk_fields",
"_fetch_fields",
"fields_db_projection",
"_inited",
"_fields_db_projection_reverse",
"filters",
"fields_map",
"default_connection",
"basequery",
"basequery_all_fields",
"_filters",
"unique_together",
"pk_attr",
"_generated_db_fields",
"_model",
)
def __init__(self, meta) -> None:
self.abstract = getattr(meta, "abstract", False) # type: bool
self.table = getattr(meta, "table", "") # type: str
self.app = getattr(meta, "app", None) # type: Optional[str]
self.unique_together = get_unique_together(meta) # type: Optional[Union[Tuple, List]]
self._fields = None # type: Optional[Set[str]]
self._db_fields = None # type: Optional[Set[str]]
self.m2m_fields = set() # type: Set[str]
self.fk_fields = set() # type: Set[str]
self.backward_fk_fields = set() # type: Set[str]
self._fetch_fields = None # type: Optional[Set[str]]
self.fields_db_projection = {} # type: Dict[str,str]
self._fields_db_projection_reverse = None # type: Optional[Dict[str,str]]
self._filters = {} # type: Dict[str, Dict[str, dict]]
self.filters = {} # type: Dict[str, dict]
self.fields_map = {} # type: Dict[str, fields.Field]
self._inited = False # type: bool
self.default_connection = None # type: Optional[str]
self.basequery = Query() # type: Query
self.basequery_all_fields = Query() # type: Query
self.pk_attr = getattr(meta, "pk_attr", "") # type: str
self._generated_db_fields = None # type: Optional[Tuple[str]]
self._model = None # type: "Model" # type: ignore
def add_field(self, name: str, value: Field):
if name in self.fields_map:
raise ConfigurationError("Field {} already present in meta".format(name))
setattr(self._model, name, value)
value.model = self._model
self.fields_map[name] = value
self._fields = None
if value.has_db_field:
self.fields_db_projection[name] = value.source_field or name
self._fields_db_projection_reverse = None
if isinstance(value, fields.ManyToManyField):
self.m2m_fields.add(name)
self._fetch_fields = None
elif isinstance(value, fields.BackwardFKRelation):
self.backward_fk_fields.add(name)
self._fetch_fields = None
field_filters = get_filters_for_field(
field_name=name, field=value, source_field=value.source_field or name
)
self._filters.update(field_filters)
self.generate_filters()
@property
def fields_db_projection_reverse(self) -> Dict[str, str]:
if self._fields_db_projection_reverse is None:
self._fields_db_projection_reverse = {
value: key for key, value in self.fields_db_projection.items()
}
return self._fields_db_projection_reverse
@property
def fields(self) -> Set[str]:
if self._fields is None:
self._fields = set(self.fields_map.keys())
return self._fields
@property
def db_fields(self) -> Set[str]:
if self._db_fields is None:
self._db_fields = set(self.fields_db_projection.values())
return self._db_fields
@property
def fetch_fields(self):
if self._fetch_fields is None:
self._fetch_fields = self.m2m_fields | self.backward_fk_fields | self.fk_fields
return self._fetch_fields
@property
def pk(self):
return self.fields_map[self.pk_attr]
@property
def db_pk_field(self) -> str:
field_object = self.fields_map[self.pk_attr]
return field_object.source_field or self.pk_attr
@property
def is_pk_generated(self) -> bool:
field_object = self.fields_map[self.pk_attr]
return field_object.generated
@property
def generated_db_fields(self) -> Tuple[str]:
"""Return list of names of db fields that are generated on db side"""
if self._generated_db_fields is None:
generated_fields = []
for field in self.fields_map.values():
if not field.generated:
continue
generated_fields.append(field.source_field or field.model_field_name)
self._generated_db_fields = tuple(generated_fields) # type: ignore
return self._generated_db_fields # type: ignore
@property
def db(self) -> BaseDBAsyncClient:
try:
return current_transaction_map[self.default_connection].get()
except KeyError:
raise ConfigurationError("No DB associated to model")
def get_filter(self, key: str) -> dict:
return self.filters[key]
def generate_filters(self) -> None:
get_overridden_filter_func = self.db.executor_class.get_overridden_filter_func
for key, filter_info in self._filters.items():
overridden_operator = get_overridden_filter_func( # type: ignore
filter_func=filter_info["operator"]
)
if overridden_operator:
filter_info = copy(filter_info)
filter_info["operator"] = overridden_operator # type: ignore
self.filters[key] = filter_info
class ModelMeta(type):
__slots__ = ()
def __new__(mcs, name: str, bases, attrs: dict, *args, **kwargs):
fields_db_projection = {} # type: Dict[str,str]
fields_map = {} # type: Dict[str, fields.Field]
filters = {} # type: Dict[str, Dict[str, dict]]
fk_fields = set() # type: Set[str]
m2m_fields = set() # type: Set[str]
meta_class = attrs.get("Meta", type("Meta", (), {}))
pk_attr = "id"
# ---------- 自定义:继承父类的字段 -------------
for base in bases:
if hasattr(base, '_meta') and hasattr(base._meta, 'fields_map'):
attrs.update(base._meta.fields_map)
# ----------- end -----------------
if name != "Model":
custom_pk_present = False
for key, value in attrs.items():
if isinstance(value, fields.Field):
if value.pk:
if custom_pk_present:
raise ConfigurationError(
"Can't create model {} with two primary keys, "
"only single pk are supported".format(name)
)
elif value.generated and not isinstance(
value, (fields.IntField, fields.BigIntField)
):
raise ConfigurationError(
"Generated primary key allowed only for IntField and BigIntField"
)
custom_pk_present = True
pk_attr = key
if not custom_pk_present:
if "id" not in attrs:
attrs["id"] = fields.IntField(pk=True)
if not isinstance(attrs["id"], fields.Field) or not attrs["id"].pk:
raise ConfigurationError(
"Can't create model {} without explicit primary key "
"if field 'id' already present".format(name)
)
for key, value in attrs.items():
if isinstance(value, fields.Field):
if getattr(meta_class, "abstract", None):
value = deepcopy(value)
fields_map[key] = value
value.model_field_name = key
if isinstance(value, fields.ForeignKeyField):
fk_fields.add(key)
elif isinstance(value, fields.ManyToManyField):
m2m_fields.add(key)
else:
fields_db_projection[key] = value.source_field or key
filters.update(
get_filters_for_field(
field_name=key,
field=fields_map[key],
source_field=fields_db_projection[key],
)
)
if value.pk:
filters.update(
get_filters_for_field(
field_name="pk",
field=fields_map[key],
source_field=fields_db_projection[key],
)
)
attrs["_meta"] = meta = MetaInfo(meta_class)
meta.fields_map = fields_map
meta.fields_db_projection = fields_db_projection
meta._filters = filters
meta.fk_fields = fk_fields
meta.backward_fk_fields = set()
meta.m2m_fields = m2m_fields
meta.default_connection = None
meta.pk_attr = pk_attr
meta._inited = False
if not fields_map:
meta.abstract = True
new_class = super().__new__(mcs, name, bases, attrs) # type: "Model" # type: ignore
for field in meta.fields_map.values():
field.model = new_class
meta._model = new_class
return new_class
class Model(metaclass=ModelMeta):
# I don' like this here, but it makes autocompletion and static analysis much happier
_meta = MetaInfo(None)
def __init__(self, *args, _from_db: bool = False, **kwargs) -> None:
# self._meta is a very common attribute lookup, lets cache it.
meta = self._meta
self._saved_in_db = _from_db or (meta.pk_attr in kwargs and meta.is_pk_generated)
# Create lazy fk/m2m objects
for key in meta.backward_fk_fields:
field_object = meta.fields_map[key]
setattr(
self,
key,
RelationQueryContainer(
field_object.type, field_object.relation_field, self # type: ignore
),
)
for key in meta.m2m_fields:
field_object = meta.fields_map[key]
setattr(
self,
key,
ManyToManyRelationManager( # type: ignore
field_object.type, self, field_object
),
)
# Assign values and do type conversions
passed_fields = set(kwargs.keys())
passed_fields.update(meta.fetch_fields)
passed_fields |= self._set_field_values(kwargs)
# Assign defaults for missing fields
for key in meta.fields.difference(passed_fields):
field_object = meta.fields_map[key]
if callable(field_object.default):
setattr(self, key, field_object.default())
else:
setattr(self, key, field_object.default)
def _set_field_values(self, values_map: Dict[str, Any]) -> Set[str]:
"""
Sets values for fields honoring type transformations and
return list of fields that were set additionally
"""
meta = self._meta
passed_fields = set()
for key, value in values_map.items():
if key in meta.fk_fields:
if hasattr(value, "pk") and not value.pk:
raise OperationalError(
"You should first call .save() on {} before referring to it".format(value)
)
relation_field = "{}_id".format(key)
setattr(self, relation_field, value.pk)
passed_fields.add(relation_field)
elif key in meta.fields:
field_object = meta.fields_map[key]
if value is None and not field_object.null:
raise ValueError("{} is non nullable field, but null was passed".format(key))
setattr(self, key, field_object.to_python_value(value))
elif key in meta.db_fields:
field_object = meta.fields_map[meta.fields_db_projection_reverse[key]]
if value is None and not field_object.null:
raise ValueError("{} is non nullable field, but null was passed".format(key))
setattr(self, key, field_object.to_python_value(value))
elif key in meta.backward_fk_fields:
raise ConfigurationError(
"You can't set backward relations through init, change related model instead"
)
elif key in meta.m2m_fields:
raise ConfigurationError(
"You can't set m2m relations through init, use m2m_manager instead"
)
return passed_fields
def __str__(self) -> str:
return "<{}>".format(self.__class__.__name__)
def __repr__(self) -> str:
if self.pk:
return "<{}: {}>".format(self.__class__.__name__, self.pk)
return "<{}>".format(self.__class__.__name__)
def __hash__(self) -> int:
if not self.pk:
raise TypeError("Model instances without id are unhashable")
return hash(self.pk)
def __eq__(self, other) -> bool:
# pylint: disable=C0123
if type(self) == type(other) and self.pk == other.pk:
return True
return False
def _get_pk_val(self):
return getattr(self, self._meta.pk_attr)
def _set_pk_val(self, value):
setattr(self, self._meta.pk_attr, value)
pk = property(_get_pk_val, _set_pk_val)
"""
Alias to the models Primary Key.
Can be used as a field name when doing filtering e.g. ``.filter(pk=...)`` etc...
"""
async def save(self, using_db=None) -> None:
"""
Creates/Updates the current model object.
"""
db = using_db or self._meta.db
executor = db.executor_class(model=self.__class__, db=db)
if self._saved_in_db:
await executor.execute_update(self)
else:
await executor.execute_insert(self)
self._saved_in_db = True
async def delete(self, using_db=None) -> None:
"""
Deletes the current model object.
:raises OperationalError: If object has never been persisted.
"""
db = using_db or self._meta.db
if not self._saved_in_db:
raise OperationalError("Can't delete unpersisted record")
await db.executor_class(model=self.__class__, db=db).execute_delete(self)
async def fetch_related(self, *args, using_db=None) -> None:
"""
Fetch related fields.
.. code-block:: python3
User.fetch_related("emails", "manager")
:param args: The related fields that should be fetched.
"""
db = using_db or self._meta.db
await db.executor_class(model=self.__class__, db=db).fetch_for_list([self], *args)
@classmethod
async def get_or_create(
cls: Type[MODEL_TYPE], using_db=None, defaults=None, **kwargs
) -> Tuple[MODEL_TYPE, bool]:
"""
Fetches the object if exists (filtering on the provided parameters),
else creates an instance with any unspecified parameters as default values.
"""
if not defaults:
defaults = {}
instance = await cls.filter(**kwargs).first()
if instance:
return instance, False
return await cls.create(**defaults, **kwargs, using_db=using_db), True
@classmethod
async def create(cls: Type[MODEL_TYPE], **kwargs) -> MODEL_TYPE:
"""
Create a record in the DB and returns the object.
.. code-block:: python3
user = await User.create(name="...", email="...")
Equivalent to:
.. code-block:: python3
user = User(name="...", email="...")
await user.save()
"""
instance = cls(**kwargs)
db = kwargs.get("using_db") or cls._meta.db
await db.executor_class(model=cls, db=db).execute_insert(instance)
instance._saved_in_db = True
return instance
@classmethod
async def bulk_create(cls: Type[MODEL_TYPE], objects: List[MODEL_TYPE], using_db=None) -> None:
"""
Bulk insert operation:
.. note::
The bulk insert operation will do the minimum to ensure that the object
created in the DB has all the defaults and generated fields set,
but may be incomplete reference in Python.
e.g. ``IntField`` primary keys will not be poplulated.
This is recommend only for throw away inserts where you want to ensure optimal
insert performance.
.. code-block:: python3
User.bulk_create([
User(name="...", email="..."),
User(name="...", email="...")
])
:param objects: List of objects to bulk create
"""
db = using_db or cls._meta.db
await db.executor_class(model=cls, db=db).execute_bulk_insert(objects)
@classmethod
def first(cls) -> QuerySet:
"""
Generates a QuerySet that returns the first record.
"""
return QuerySet(cls).first()
@classmethod
def filter(cls, *args, **kwargs) -> QuerySet:
"""
Generates a QuerySet with the filter applied.
"""
return QuerySet(cls).filter(*args, **kwargs)
@classmethod
def exclude(cls, *args, **kwargs) -> QuerySet:
"""
Generates a QuerySet with the exclude applied.
"""
return QuerySet(cls).exclude(*args, **kwargs)
@classmethod
def annotate(cls, **kwargs) -> QuerySet:
return QuerySet(cls).annotate(**kwargs)
@classmethod
def all(cls) -> QuerySet:
"""
Returns the complete QuerySet.
"""
return QuerySet(cls)
@classmethod
def get(cls, *args, **kwargs) -> QuerySet:
"""
Fetches a single record for a Model type using the provided filter parameters.
.. code-block:: python3
user = await User.get(username="foo")
:raises MultipleObjectsReturned: If provided search returned more than one object.
:raises DoesNotExist: If object can not be found.
"""
return QuerySet(cls).get(*args, **kwargs)
@classmethod
async def fetch_for_list(cls, instance_list, *args, using_db=None):
db = using_db or cls._meta.db
await db.executor_class(model=cls, db=db).fetch_for_list(instance_list, *args)
@classmethod
def check(cls) -> None:
"""
Calls various checks to validate the model.
:raises ConfigurationError: If the model has not been configured correctly.
"""
cls._check_unique_together()
@classmethod
def _check_unique_together(cls) -> None:
"""Check the value of "unique_together" option."""
if cls._meta.unique_together is None:
return
if not isinstance(cls._meta.unique_together, (tuple, list)):
raise ConfigurationError(
"'{}.unique_together' must be a list or tuple.".format(cls.__name__)
)
elif any(
not isinstance(unique_fields, (tuple, list))
for unique_fields in cls._meta.unique_together
):
raise ConfigurationError(
"All '{}.unique_together' elements must be lists or tuples.".format(cls.__name__)
)
else:
for fields_tuple in cls._meta.unique_together:
for field_name in fields_tuple:
field = cls._meta.fields_map.get(field_name)
if not field:
raise ConfigurationError(
"'{}.unique_together' has no '{}' "
"field.".format(cls.__name__, field_name)
)
if isinstance(field, ManyToManyField):
raise ConfigurationError(
"'{}.unique_together' '{}' field refers "
"to ManyToMany field.".format(cls.__name__, field_name)
)
class Meta:
"""
The ``Meta`` class is used to configure metadate for the Model.
Usage:
.. code-block:: python3
class Foo(Model):
...
class Meta:
table="custom_table"
unique_together=(("field_a", "field_b"), )
"""
pass
| 36.280066 | 99 | 0.572382 |
27f7f096092aefcd4db539903c33b89718e74563
| 1,996 |
py
|
Python
|
sscanss/core/math/misc.py
|
jfkcooper/SScanSS-2
|
ae50c1d065732a7742eaf1a7b9a9349907c29f8a
|
[
"BSD-3-Clause"
] | null | null | null |
sscanss/core/math/misc.py
|
jfkcooper/SScanSS-2
|
ae50c1d065732a7742eaf1a7b9a9349907c29f8a
|
[
"BSD-3-Clause"
] | null | null | null |
sscanss/core/math/misc.py
|
jfkcooper/SScanSS-2
|
ae50c1d065732a7742eaf1a7b9a9349907c29f8a
|
[
"BSD-3-Clause"
] | null | null | null |
"""
A collection of miscellaneous functions
"""
import math
import numpy as np
from .constants import VECTOR_EPS
def clamp(value, min_value=0.0, max_value=1.0):
"""Clamps a value between a minimum and maximum value.
Similar to ``numpy.clip`` but is faster for non-array
:param value: number to clamp
:type value: float
:param min_value: maximum value
:type min_value: float
:param max_value: minimum value
:type max_value: float
:return: number clamped between the specified range
:rtype: float
"""
return max(min(value, max_value), min_value)
def map_range(old_min, old_max, new_min, new_max, value):
"""Maps a given value from the initial (first) range to a another (second) range.
:param old_min: minimum of first range
:type old_min: float
:param old_max: maximum of first range
:type old_max: float
:param new_min: minimum of second range
:type new_min: float
:param new_max: maximum of second range
:type new_max: float
:param value: real number to remap
:type value: float
:return: remapped value
:rtype: float
"""
return new_min + ((value - old_min) * (new_max - new_min) / (old_max - old_min))
def trunc(value, decimals=0):
"""Truncates values after a number of decimal points
:param value: number to truncate
:type value: float
:param decimals: number of decimals points to keep
:type decimals: int
:return: truncated float
:rtype: float
"""
step = 10**decimals
return math.trunc(value * step) / step
def is_close(a, b, tol=VECTOR_EPS):
"""Checks that two values are close by comparing absolute difference with tolerance
:param a: first value
:type a: array_like
:param b: second value
:type b: array_like
:param tol: tolerance
:type tol: float
:return: indicates if values are close
:rtype: bool
"""
if np.all(np.abs(np.subtract(a, b)) < tol):
return True
return False
| 27.342466 | 87 | 0.673347 |
d11f8be96301f2df52cf1eb085feb90e39e14161
| 1,002 |
py
|
Python
|
api/kubeops_api/models/credential.py
|
liqiang-fit2cloud/KubeOperator
|
cb9346b95d29919570cefa6bea1ce4e5c3f0ee6d
|
[
"Apache-2.0"
] | 3 |
2019-11-29T03:49:08.000Z
|
2020-07-29T02:52:51.000Z
|
api/kubeops_api/models/credential.py
|
liqiang-fit2cloud/KubeOperator
|
cb9346b95d29919570cefa6bea1ce4e5c3f0ee6d
|
[
"Apache-2.0"
] | 1 |
2019-09-09T07:27:51.000Z
|
2019-09-09T07:27:51.000Z
|
api/kubeops_api/models/credential.py
|
liqiang-fit2cloud/KubeOperator
|
cb9346b95d29919570cefa6bea1ce4e5c3f0ee6d
|
[
"Apache-2.0"
] | 1 |
2020-03-04T00:29:29.000Z
|
2020-03-04T00:29:29.000Z
|
import uuid
from django.db import models
from django.utils.translation import ugettext_lazy as _
from common import models as common_models
__all__ = ["Credential"]
class Credential(models.Model):
CREDENTIAL_TYPE_PASSWORD = "password"
CREDENTIAL_TYPE_PRIVATE_KEY = "privateKey"
CREDENTIAL_TYPE_CHOICES = (
(CREDENTIAL_TYPE_PASSWORD, "password"),
(CREDENTIAL_TYPE_PRIVATE_KEY, "privateKey")
)
id = models.UUIDField(default=uuid.uuid4, primary_key=True)
name = models.SlugField(max_length=128, allow_unicode=True, unique=True, verbose_name=_('Name'))
username = models.CharField(max_length=256, default='root')
password = common_models.EncryptCharField(max_length=4096, blank=True, null=True)
private_key = common_models.EncryptCharField(max_length=8192, blank=True, null=True)
type = models.CharField(max_length=128, choices=CREDENTIAL_TYPE_CHOICES, default=CREDENTIAL_TYPE_PASSWORD)
date_created = models.DateTimeField(auto_now_add=True)
| 41.75 | 110 | 0.771457 |
741c8f4c5fd6199dbc7495862bdc07f926b62bf6
| 2,699 |
py
|
Python
|
src/testServer2.py
|
cyberjacob/caveman
|
4ee3ff557a6325d305f3ce44822042787f648e3b
|
[
"MIT"
] | null | null | null |
src/testServer2.py
|
cyberjacob/caveman
|
4ee3ff557a6325d305f3ce44822042787f648e3b
|
[
"MIT"
] | null | null | null |
src/testServer2.py
|
cyberjacob/caveman
|
4ee3ff557a6325d305f3ce44822042787f648e3b
|
[
"MIT"
] | null | null | null |
import socket
import threading
def start():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# listen for upto 50 cnxns on port 8000
sock.bind(('', 1337))
sock.listen(50)
print '[L] Socket Opened, listening for connections'
while True:
csock,caddr = sock.accept()
print '[N] new connection from '+caddr[0]+':'+str(caddr[1])
# Start a thread to service each cnxn
t = threading.Thread(target=handle_cnxn, args=(csock,caddr,))
t.start()
def handle_cnxn(csock, caddr):
shake1 = csock.recv(1024)
print caddr[0]+'-> '+shake1
shakelist = shake1.split("\r\n")
print 'Got handshake'
# Extract key1 and key2
for elem in shakelist:
if elem.startswith("Sec-WebSocket-Key:"):
client64Key = elem[19:] # Sec-WebSocket-Key1: is 20 chars
#print 'got key '+client64Key
elif elem.startswith("Origin:"):
ws_origin = elem[8:]
elif elem.startswith("Host:"):
ws_host = elem[6:]
elif elem.startswith("GET "):
ws_path = elem[4:-9]
else:
continue
# Concat key1, key2, and the the body of the client handshake and take the md5 sum of it
print 'got client key '+client64Key
key = client64Key + "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
print 'got joint key '+key
import hashlib, base64
m = hashlib.sha1()
m.update(key)
d = base64.b64encode(m.digest())
print 'got base64 hash '+d
# Send 'headers'
# Modified to automatically adhere to the Same-Origin Policy.
# DO NOT USE IN PRODUCTION CODE!!!
csock.send("HTTP/1.1 101 WebSocket Protocol Handshake\r\n")
csock.send("Upgrade: WebSocket\r\n")
csock.send("Connection: Upgrade\r\n")
csock.send("Sec-WebSocket-Accept: " + d + "\r\n")
csock.send("Sec-WebSocket-Origin: " + ws_origin + "\r\n")
csock.send("Sec-WebSocket-Location: ws://" + ws_host + ws_path + "\r\n")
#csock.send("Sec-WebSocket-Protocol: chat\r\n")
csock.send("\r\n")
#Send digest
csock.send(d)
# Message framing - 0x00 utf-8-encoded-body 0xFF
def send(data):
first_byte = chr(0x00)
payload = data.encode('utf-8')
pl = first_byte + payload + chr(0xFF)
csock.send(pl)
from time import sleep
# This is dependent on you - what you wish to send to the browser
i = 0
while True:
send(str(i))
print caddr[0]+'-> '+str(i)
shake1 = csock.recv(1024)
print caddr[0]+'<- '+shake1
i += 1
sleep(1)
if __name__ == "__main__":
start()
| 29.988889 | 92 | 0.601704 |
600b3789d3e479a43cbfd1ea40eb0e340aefacc2
| 7,792 |
py
|
Python
|
trestle/core/validator_helper.py
|
PritamDutt/compliance-trestle
|
7edadde2bd2949e73a085bd78ef57995250fc9cb
|
[
"Apache-2.0"
] | null | null | null |
trestle/core/validator_helper.py
|
PritamDutt/compliance-trestle
|
7edadde2bd2949e73a085bd78ef57995250fc9cb
|
[
"Apache-2.0"
] | null | null | null |
trestle/core/validator_helper.py
|
PritamDutt/compliance-trestle
|
7edadde2bd2949e73a085bd78ef57995250fc9cb
|
[
"Apache-2.0"
] | null | null | null |
# -*- mode:python; coding:utf-8 -*-
# Copyright (c) 2020 IBM Corp. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for dealing with models."""
import logging
import uuid
from typing import Any, Dict, List, Tuple, TypeVar
import pydantic
from trestle.oscal import common
logger = logging.getLogger(__name__)
# Generic type var
TG = TypeVar('TG')
def find_values_by_name(object_of_interest: Any, name_of_interest: str) -> List[Any]:
"""Traverse object and return list of values of specified name."""
loe = []
if isinstance(object_of_interest, pydantic.BaseModel):
value = getattr(object_of_interest, name_of_interest, None)
if value is not None:
loe.append(value)
fields = getattr(object_of_interest, '__fields_set__', None)
if fields is not None:
for field in fields:
loe.extend(find_values_by_name(getattr(object_of_interest, field, None), name_of_interest))
elif type(object_of_interest) is list:
for item in object_of_interest:
loe.extend(find_values_by_name(item, name_of_interest))
elif type(object_of_interest) is dict:
if name_of_interest in object_of_interest:
loe.append(object_of_interest[name_of_interest])
for item in object_of_interest.values():
loe.extend(find_values_by_name(item, name_of_interest))
return loe
def has_no_duplicate_values_by_name(object_of_interest: Any, name_of_interest: str) -> bool:
"""Determine if duplicate values of type exist in object."""
loe = find_values_by_name(object_of_interest, name_of_interest)
set_loe = set(loe)
if len(loe) == len(set_loe):
return True
items = {}
for item in loe:
items[item] = items.get(item, 0) + 1
# now print items
for item, instances in items.items():
if instances > 1:
logger.info(f'Duplicate detected of item {item} with {instances} instances.')
return False
def _regenerate_uuids_in_place(object_of_interest: Any, uuid_lut: Dict[str, str]) -> Tuple[Any, Dict[str, str]]:
"""Update all uuids in model that require updating.
Go through the model and replace all dicts with key == 'uuid' and replace the value with a new uuid4.
Build a lookup table of the updates that were made.
This function does not update the corresponding refs to those uuid's. That is done by update_uuid_refs
Note that this function needs to be started off with uuid_lut == {}, i.e. an empty dict.
After that it recurses and grows the lut.
Args:
object_of_interest: pydantic.BaseModel, list, dict or str will be updated
uuid_lut: dict of the growing lut of old:new uuid's. First call must be made with value {}
Returns:
The updated object_of_interest with new uuid's (but refs to them are not updated)
The final lookup table of old:new uuid's
"""
uuid_str = 'uuid'
# Certain types are known not to need updating and should not change
# Resources are identified by uuid, and the corresponding href will have # in front of the uuid string
# Neither of these should change
# If other similar types are found they should be added to the FixedUuidModel typevar to prevent updating
if isinstance(object_of_interest, common.Resource):
pass
elif isinstance(object_of_interest, pydantic.BaseModel):
# fields_set has names of fields set when model was initialized
fields = getattr(object_of_interest, '__fields_set__', None)
for field in fields:
new_object = None
if field == uuid_str:
new_object = str(uuid.uuid4())
uuid_lut[object_of_interest.__dict__[field]] = new_object
else:
new_object, uuid_lut = _regenerate_uuids_in_place(object_of_interest.__dict__[field], uuid_lut)
object_of_interest.__dict__[field] = new_object
elif type(object_of_interest) is list:
new_list = []
for item in object_of_interest:
new_item, uuid_lut = _regenerate_uuids_in_place(item, uuid_lut)
new_list.append(new_item)
object_of_interest = new_list
elif type(object_of_interest) is dict:
new_dict = {}
for key, value in object_of_interest.items():
if key == uuid_str:
new_val = str(uuid.uuid4())
new_dict[uuid_str] = new_val
uuid_lut[value] = new_val
else:
new_value, uuid_lut = _regenerate_uuids_in_place(value, uuid_lut)
new_dict[key] = new_value
object_of_interest = new_dict
return object_of_interest, uuid_lut
def _update_new_uuid_refs(object_of_interest: Any, uuid_lut: Dict[str, str]) -> Tuple[Any, int]:
"""Update all refs to uuids that were changed."""
n_refs_updated = 0
if isinstance(object_of_interest, pydantic.BaseModel):
fields = getattr(object_of_interest, '__fields_set__', None)
for field in fields:
new_object, n_new_updates = _update_new_uuid_refs(object_of_interest.__dict__[field], uuid_lut)
n_refs_updated += n_new_updates
object_of_interest.__dict__[field] = new_object
elif type(object_of_interest) is list:
new_list = []
for item in object_of_interest:
new_item, n_new_updates = _update_new_uuid_refs(item, uuid_lut)
n_refs_updated += n_new_updates
new_list.append(new_item)
object_of_interest = new_list
elif type(object_of_interest) is dict:
new_dict = {}
for key, value in object_of_interest.items():
if isinstance(value, str):
if value in uuid_lut:
new_dict[key] = uuid_lut[value]
n_refs_updated += 1
else:
new_dict[key] = value
else:
new_value, n_new_updates = _update_new_uuid_refs(value, uuid_lut)
n_refs_updated += n_new_updates
new_dict[key] = new_value
object_of_interest = new_dict
elif isinstance(object_of_interest, str):
if object_of_interest in uuid_lut:
n_refs_updated += 1
object_of_interest = uuid_lut[object_of_interest]
return object_of_interest, n_refs_updated
def regenerate_uuids(object_of_interest: Any) -> Tuple[Any, Dict[str, str], int]:
"""Regenerate all uuids in object and update corresponding references.
Find all dicts with key == 'uuid' and replace the value with a new uuid4.
Build a corresponding lookup table as you go, of old:new uuid values.
Then make a second pass through the object and replace all string values
present in the lookup table with the new value.
Args:
object_of_interest: pydantic.BaseModel, list, dict or str will be updated
Returns:
The updated object with new uuid's and refs
The final lookup table of old:new uuid's
A count of the number of refs that were updated
"""
new_object, uuid_lut = _regenerate_uuids_in_place(object_of_interest, {})
new_object, n_refs_updated = _update_new_uuid_refs(new_object, uuid_lut)
return new_object, uuid_lut, n_refs_updated
| 43.049724 | 112 | 0.681083 |
cb50e81555b89c6d1c894c64edfccfda6a6106cd
| 3,262 |
py
|
Python
|
jaseci_core/jaseci/tests/test_ll_proto.py
|
panikingginoo12/jaseci
|
6659ab3a3edde865e2ff9a8dc6f2c0f98588d05b
|
[
"MIT"
] | null | null | null |
jaseci_core/jaseci/tests/test_ll_proto.py
|
panikingginoo12/jaseci
|
6659ab3a3edde865e2ff9a8dc6f2c0f98588d05b
|
[
"MIT"
] | null | null | null |
jaseci_core/jaseci/tests/test_ll_proto.py
|
panikingginoo12/jaseci
|
6659ab3a3edde865e2ff9a8dc6f2c0f98588d05b
|
[
"MIT"
] | null | null | null |
from jaseci.utils.mem_hook import mem_hook
from jaseci.actor.sentinel import sentinel
from jaseci.graph.graph import graph
from jaseci.utils.utils import TestCaseHelper
from unittest import TestCase
import jaseci.tests.jac_test_code as jtc
import uuid
class jac_tests(TestCaseHelper, TestCase):
"""Unit tests for Jac language"""
def setUp(self):
super().setUp()
self.gph = graph(m_id='anon', h=mem_hook())
self.sent = sentinel(m_id='anon', h=self.gph._h)
def tearDown(self):
super().tearDown()
def test_ll_proto_load(self):
"""Test loading/parsing ll prototype"""
self.sent.register_code(jtc.ll_proto)
self.assertTrue(self.sent.is_active)
def test_rand_generation(self):
"""Test loading/parsing ll prototype"""
self.sent.register_code(jtc.ll_proto)
gen_walker = self.sent.walker_ids.get_obj_by_name('init')
gen_walker.prime(self.gph)
gen_walker.run()
gen_walker = self.sent.walker_ids.get_obj_by_name(
'gen_rand_life', kind='walker')
gen_walker.prime(self.gph.outbound_nodes()[0])
gen_walker.run()
self.assertGreater(len(self.gph._h.mem.keys()), 70)
def test_objects_created(self):
"""Test loading/parsing ll prototype"""
self.sent.register_code(jtc.ll_proto)
gen_walker = self.sent.walker_ids.get_obj_by_name('init')
gen_walker.prime(self.gph)
gen_walker.run()
gen_walker = self.sent.walker_ids.get_obj_by_name(
'gen_rand_life', kind='walker')
gen_walker.prime(self.gph.outbound_nodes()[0])
gen_walker.run()
for i in self.gph._h.mem.keys():
if(i == 'global'):
continue
self.gph._h.mem[i].json()
def test_get_latest_day(self):
"""Test loading/parsing ll prototype"""
self.sent.register_code(jtc.ll_proto)
gen_walker = self.sent.walker_ids.get_obj_by_name('init')
gen_walker.prime(self.gph)
gen_walker.run()
gen_walker = self.sent.walker_ids.get_obj_by_name(
'gen_rand_life', kind='walker')
gen_walker.prime(self.gph.outbound_nodes()[0])
gen_walker.run()
lday_walk = self.sent.walker_ids.get_obj_by_name(
'get_latest_day')
lday_walk.prime(self.gph.outbound_nodes()[0])
lday_walk.run()
ret = lday_walk.context['latest_day']
self.assertEqual(self.gph._h.get_obj(
self.gph._m_id, uuid.UUID(ret)).name, 'day')
def test_carry_forward(self):
"""Test loading/parsing ll prototype"""
self.sent.register_code(jtc.ll_proto)
gen_walker = self.sent.walker_ids.get_obj_by_name('init')
gen_walker.prime(self.gph)
gen_walker.run()
gen_walker = self.sent.walker_ids.get_obj_by_name(
'gen_rand_life', kind='walker')
gen_walker.prime(self.gph)
gen_walker.run()
lday_walk = self.sent.walker_ids.get_obj_by_name(
'get_gen_day')
lday_walk.prime(self.gph)
lday_walk.run()
day = self.gph._h.get_obj(
self.gph._m_id, uuid.UUID(lday_walk.context['day_node']))
self.assertGreater(len(day.edge_ids), 3)
| 36.244444 | 69 | 0.643777 |
d2b6a9f7dd79c655a9660652c362663a58f9580a
| 4,277 |
py
|
Python
|
homeassistant/components/axis/binary_sensor.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 30,023 |
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
homeassistant/components/axis/binary_sensor.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 24,710 |
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
homeassistant/components/axis/binary_sensor.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 11,956 |
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Support for Axis binary sensors."""
from datetime import timedelta
from axis.event_stream import (
CLASS_INPUT,
CLASS_LIGHT,
CLASS_MOTION,
CLASS_OUTPUT,
CLASS_PTZ,
CLASS_SOUND,
FenceGuard,
LoiteringGuard,
MotionGuard,
ObjectAnalytics,
Vmd4,
)
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util.dt import utcnow
from .axis_base import AxisEventBase
from .const import DOMAIN as AXIS_DOMAIN
DEVICE_CLASS = {
CLASS_INPUT: BinarySensorDeviceClass.CONNECTIVITY,
CLASS_LIGHT: BinarySensorDeviceClass.LIGHT,
CLASS_MOTION: BinarySensorDeviceClass.MOTION,
CLASS_SOUND: BinarySensorDeviceClass.SOUND,
}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up a Axis binary sensor."""
device = hass.data[AXIS_DOMAIN][config_entry.unique_id]
@callback
def async_add_sensor(event_id):
"""Add binary sensor from Axis device."""
event = device.api.event[event_id]
if event.CLASS not in (CLASS_OUTPUT, CLASS_PTZ) and not (
event.CLASS == CLASS_LIGHT and event.TYPE == "Light"
):
async_add_entities([AxisBinarySensor(event, device)])
config_entry.async_on_unload(
async_dispatcher_connect(hass, device.signal_new_event, async_add_sensor)
)
class AxisBinarySensor(AxisEventBase, BinarySensorEntity):
"""Representation of a binary Axis event."""
def __init__(self, event, device):
"""Initialize the Axis binary sensor."""
super().__init__(event, device)
self.cancel_scheduled_update = None
self._attr_device_class = DEVICE_CLASS.get(self.event.CLASS)
@callback
def update_callback(self, no_delay=False):
"""Update the sensor's state, if needed.
Parameter no_delay is True when device_event_reachable is sent.
"""
@callback
def scheduled_update(now):
"""Timer callback for sensor update."""
self.cancel_scheduled_update = None
self.async_write_ha_state()
if self.cancel_scheduled_update is not None:
self.cancel_scheduled_update()
self.cancel_scheduled_update = None
if self.is_on or self.device.option_trigger_time == 0 or no_delay:
self.async_write_ha_state()
return
self.cancel_scheduled_update = async_track_point_in_utc_time(
self.hass,
scheduled_update,
utcnow() + timedelta(seconds=self.device.option_trigger_time),
)
@property
def is_on(self):
"""Return true if event is active."""
return self.event.is_tripped
@property
def name(self):
"""Return the name of the event."""
if (
self.event.CLASS == CLASS_INPUT
and self.event.id in self.device.api.vapix.ports
and self.device.api.vapix.ports[self.event.id].name
):
return (
f"{self.device.name} {self.device.api.vapix.ports[self.event.id].name}"
)
if self.event.CLASS == CLASS_MOTION:
for event_class, event_data in (
(FenceGuard, self.device.api.vapix.fence_guard),
(LoiteringGuard, self.device.api.vapix.loitering_guard),
(MotionGuard, self.device.api.vapix.motion_guard),
(ObjectAnalytics, self.device.api.vapix.object_analytics),
(Vmd4, self.device.api.vapix.vmd4),
):
if (
isinstance(self.event, event_class)
and event_data
and self.event.id in event_data
):
return f"{self.device.name} {self.event.TYPE} {event_data[self.event.id].name}"
return self._attr_name
| 31.91791 | 99 | 0.658639 |
ab93b2085547833b423cda098819eb107c821fc8
| 1,055 |
py
|
Python
|
hacker/qqpy/bin/pilfont.py
|
sanyueyuxincao/web-crawling
|
dc134bd6e23572a3ebfd851d0ffb6dd84cb16c1f
|
[
"MIT"
] | null | null | null |
hacker/qqpy/bin/pilfont.py
|
sanyueyuxincao/web-crawling
|
dc134bd6e23572a3ebfd851d0ffb6dd84cb16c1f
|
[
"MIT"
] | null | null | null |
hacker/qqpy/bin/pilfont.py
|
sanyueyuxincao/web-crawling
|
dc134bd6e23572a3ebfd851d0ffb6dd84cb16c1f
|
[
"MIT"
] | 2 |
2019-05-19T08:12:45.000Z
|
2021-08-28T07:16:42.000Z
|
#!/Users/het/Desktop/hacker/qqpy/bin/python3.6
#
# The Python Imaging Library
# $Id$
#
# PIL raster font compiler
#
# history:
# 1997-08-25 fl created
# 2002-03-10 fl use "from PIL import"
#
from __future__ import print_function
import glob
import sys
# drivers
from PIL import BdfFontFile
from PIL import PcfFontFile
VERSION = "0.4"
if len(sys.argv) <= 1:
print("PILFONT", VERSION, "-- PIL font compiler.")
print()
print("Usage: pilfont fontfiles...")
print()
print("Convert given font files to the PIL raster font format.")
print("This version of pilfont supports X BDF and PCF fonts.")
sys.exit(1)
files = []
for f in sys.argv[1:]:
files = files + glob.glob(f)
for f in files:
print(f + "...", end=' ')
try:
fp = open(f, "rb")
try:
p = PcfFontFile.PcfFontFile(fp)
except SyntaxError:
fp.seek(0)
p = BdfFontFile.BdfFontFile(fp)
p.save(f)
except (SyntaxError, IOError):
print("failed")
else:
print("OK")
| 18.189655 | 68 | 0.603791 |
749f7a9d598282af633a0652fad7442d75c78cea
| 1,522 |
py
|
Python
|
data/train/python/749f7a9d598282af633a0652fad7442d75c78ceaurls.py
|
harshp8l/deep-learning-lang-detection
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
[
"MIT"
] | 84 |
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/train/python/749f7a9d598282af633a0652fad7442d75c78ceaurls.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5 |
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/train/python/749f7a9d598282af633a0652fad7442d75c78ceaurls.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24 |
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from uploadFile.views import upload_file, search_file, my_reports, report_view
from userAccount.views import create_account, auth_view, user_home, logout, manage_user, manage_group, my_groups, \
manage_reports, change_password
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'Demo.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^upload/$', upload_file),
url(r'^search/$', search_file),
url(r'^view_report/(?P<id>[0-9]+)/$', report_view),
url(r'^create_account/$', create_account),
url(r'^auth/$', auth_view),
url(r'^user_home/$', user_home),
url(r'^logout/$', logout),
url(r'^manage_user/$', manage_user),
url(r'^manage_group/$', manage_group),
url(r'^manage_reports/$', manage_reports),
url(r'^my_reports/$', my_reports),
url(r'^my_groups/$', my_groups),
url(r'^change_password/$', change_password),
) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 50.733333 | 115 | 0.532852 |
ea5374754678fb3f9d0949ba9f4625f1b130479a
| 9,271 |
py
|
Python
|
sysinv/sysinv/sysinv/sysinv/puppet/openstack.py
|
MarioCarrilloA/config
|
06a6f142d154970ce658e979822cd84ce447f612
|
[
"Apache-2.0"
] | null | null | null |
sysinv/sysinv/sysinv/sysinv/puppet/openstack.py
|
MarioCarrilloA/config
|
06a6f142d154970ce658e979822cd84ce447f612
|
[
"Apache-2.0"
] | null | null | null |
sysinv/sysinv/sysinv/sysinv/puppet/openstack.py
|
MarioCarrilloA/config
|
06a6f142d154970ce658e979822cd84ce447f612
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2017 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import abc
import keyring
from sysinv.common import constants
from sysinv.puppet import base
from sysinv.helm import common
class OpenstackBasePuppet(base.BasePuppet):
def _get_service_config(self, service):
configs = self.context.setdefault('_service_configs', {})
if service not in configs:
configs[service] = self._get_service(service)
return configs[service]
def _get_service_parameter_configs(self, service):
configs = self.context.setdefault('_service_params', {})
if service not in configs:
params = self._get_service_parameters(service)
if params:
configs[service] = params
else:
return None
return configs[service]
def _get_admin_user_name(self):
return self._operator.keystone.get_admin_user_name()
def _get_service_password(self, service):
passwords = self.context.setdefault('_service_passwords', {})
if service not in passwords:
passwords[service] = self._get_keyring_password(
service,
self.DEFAULT_SERVICE_PROJECT_NAME)
return passwords[service]
def _get_service_user_name(self, service):
if self._region_config():
service_config = self._get_service_config(service)
if (service_config is not None and
'user_name' in service_config.capabilities):
return service_config.capabilities.get('user_name')
return '%s' % service
def _to_create_services(self):
if self._region_config():
service_config = self._get_service_config(
self._operator.keystone.SERVICE_NAME)
if (service_config is not None and
'region_services_create' in service_config.capabilities):
return service_config.capabilities.get('region_services_create')
return True
# Once we no longer create duplicated endpoints for shared services
# on secondary region, this function can be removed.
def _get_public_url_from_service_config(self, service):
url = ''
service_config = self._get_service_config(service)
if (service_config is not None and
'public_uri' in service_config.capabilities):
url = service_config.capabilities.get('public_uri')
if url:
protocol = self._get_public_protocol()
old_protocol = url.split(':')[0]
url = url.replace(old_protocol, protocol, 1)
return url
def _get_admin_url_from_service_config(self, service):
url = ''
service_config = self._get_service_config(service)
if (service_config is not None and
'admin_uri' in service_config.capabilities):
url = service_config.capabilities.get('admin_uri')
return url
def _get_internal_url_from_service_config(self, service):
url = ''
service_config = self._get_service_config(service)
if (service_config is not None and
'internal_uri' in service_config.capabilities):
url = service_config.capabilities.get('internal_uri')
return url
def _get_database_password(self, service):
passwords = self.context.setdefault('_database_passwords', {})
if service not in passwords:
passwords[service] = self._get_keyring_password(service,
'database')
return passwords[service]
def _get_database_username(self, service):
return 'admin-%s' % service
def _get_keyring_password(self, service, user):
password = keyring.get_password(service, user)
if not password:
password = self._generate_random_password()
keyring.set_password(service, user, password)
return password
def _get_public_protocol(self):
return 'https' if self._https_enabled() else 'http'
def _get_service_default_dns_name(self, service):
return "{}.{}.svc.{}".format(service, common.HELM_NS_OPENSTACK,
constants.DEFAULT_DNS_SERVICE_DOMAIN)
def _get_private_protocol(self):
return 'http'
def _format_public_endpoint(self, port, address=None, path=None):
protocol = self._get_public_protocol()
if address is None:
address = self._format_url_address(self._get_oam_address())
return self._format_keystone_endpoint(protocol, port, address, path)
def _format_private_endpoint(self, port, address=None, path=None):
protocol = self._get_private_protocol()
if address is None:
address = self._format_url_address(self._get_management_address())
return self._format_keystone_endpoint(protocol, port, address, path)
def _keystone_auth_address(self):
return self._operator.keystone.get_auth_address()
def _keystone_auth_host(self):
return self._operator.keystone.get_auth_host()
def _keystone_auth_port(self):
return self._operator.keystone.get_auth_port()
def _keystone_auth_uri(self):
return self._operator.keystone.get_auth_uri()
def _keystone_identity_uri(self):
return self._operator.keystone.get_identity_uri()
def _keystone_region_name(self):
return self._operator.keystone._identity_specific_region_name()
def _get_service_region_name(self, service):
if self._region_config():
service_config = self._get_service_config(service)
if (service_config is not None and
service_config.region_name is not None):
return service_config.region_name
if (self._distributed_cloud_role() ==
constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER and
service in self.SYSTEM_CONTROLLER_SERVICES):
return constants.SYSTEM_CONTROLLER_REGION
return self._region_name()
def _get_swift_service_tenant_name(self):
return self._get_swift_service_project_name()
def _get_service_tenant_name(self):
return self._get_service_project_name()
def _get_configured_service_name(self, service, version=None):
if self._region_config():
service_config = self._get_service_config(service)
if service_config is not None:
name = 'service_name'
if version is not None:
name = version + '_' + name
service_name = service_config.capabilities.get(name)
if service_name is not None:
return service_name
elif version is not None:
return service + version
else:
return service
def _get_configured_service_type(self, service, version=None):
if self._region_config():
service_config = self._get_service_config(service)
if service_config is not None:
stype = 'service_type'
if version is not None:
stype = version + '_' + stype
return service_config.capabilities.get(stype)
return None
def _get_swift_service_user_domain_name(self):
return self._operator.keystone.get_swift_service_user_domain()
def _get_service_user_domain_name(self):
return self._operator.keystone.get_service_user_domain()
def _get_service_project_domain_name(self):
return self._operator.keystone.get_service_project_domain()
# Get SystemController's address of DistributedCloud.
def _get_system_controller_addr(self):
sys_controller_network = self.dbapi.network_get_by_type(
constants.NETWORK_TYPE_SYSTEM_CONTROLLER)
sys_controller_network_addr_pool = self.dbapi.address_pool_get(
sys_controller_network.pool_uuid)
addr = sys_controller_network_addr_pool.floating_address
return addr
@staticmethod
def _format_keystone_endpoint(protocol, port, address, path):
url = "%s://%s:%s" % (protocol, str(address), str(port))
if path is None:
return url
else:
return "%s/%s" % (url, path)
def _format_database_connection(self, service,
address=None, database=None):
if not address:
address = self._get_management_address()
if not database:
database = service
return "postgresql://%s:%s@%s/%s" % (
self._get_database_username(service),
self._get_database_password(service),
self._format_url_address(address),
database)
@abc.abstractmethod
def get_public_url(self):
"""Return the public endpoint URL for the service"""
raise NotImplementedError()
@abc.abstractmethod
def get_internal_url(self):
"""Return the internal endpoint URL for the service"""
raise NotImplementedError()
@abc.abstractmethod
def get_admin_url(self):
"""Return the admin endpoint URL for the service"""
raise NotImplementedError()
| 37.383065 | 80 | 0.653328 |
97c2e0e9f5b016e2f11bf7b13558595494c840ad
| 2,219 |
py
|
Python
|
IPython/html/widgets/widget_int.py
|
ptone/ipython
|
b91d6a658d4526746dcbfb62e653d71c5d84eee9
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
IPython/html/widgets/widget_int.py
|
ptone/ipython
|
b91d6a658d4526746dcbfb62e653d71c5d84eee9
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
IPython/html/widgets/widget_int.py
|
ptone/ipython
|
b91d6a658d4526746dcbfb62e653d71c5d84eee9
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
"""IntWidget class.
Represents an unbounded int using a widget.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from .widget import DOMWidget
from IPython.utils.traitlets import Unicode, CInt, Bool, List, Enum
#-----------------------------------------------------------------------------
# Classes
#-----------------------------------------------------------------------------
class _IntWidget(DOMWidget):
value = CInt(0, help="Int value", sync=True)
disabled = Bool(False, help="Enable or disable user changes", sync=True)
description = Unicode(help="Description of the value this widget represents", sync=True)
class _BoundedIntWidget(_IntWidget):
step = CInt(1, help="Minimum step that the value can take (ignored by some views)", sync=True)
max = CInt(100, help="Max value", sync=True)
min = CInt(0, help="Min value", sync=True)
def __init__(self, *pargs, **kwargs):
"""Constructor"""
DOMWidget.__init__(self, *pargs, **kwargs)
self.on_trait_change(self._validate, ['value', 'min', 'max'])
def _validate(self, name, old, new):
"""Validate value, max, min."""
if self.min > new or new > self.max:
self.value = min(max(new, self.min), self.max)
class IntTextWidget(_IntWidget):
_view_name = Unicode('IntTextView', sync=True)
class BoundedIntTextWidget(_BoundedIntWidget):
_view_name = Unicode('IntTextView', sync=True)
class IntSliderWidget(_BoundedIntWidget):
_view_name = Unicode('IntSliderView', sync=True)
orientation = Enum([u'horizontal', u'vertical'], u'horizontal',
help="Vertical or horizontal.", sync=True)
class IntProgressWidget(_BoundedIntWidget):
_view_name = Unicode('ProgressView', sync=True)
| 36.983333 | 98 | 0.547093 |
58b9876cad961b1de557b34b2cc972c55d636091
| 14,053 |
py
|
Python
|
SpoTwillio/lib/python3.6/site-packages/twilio/rest/api/v2010/account/transcription.py
|
Natfan/funlittlethings
|
80d5378b45b5c0ead725942ee50403bd057514a6
|
[
"MIT"
] | 3 |
2019-11-12T07:55:51.000Z
|
2020-04-01T11:19:18.000Z
|
SpoTwillio/lib/python3.6/site-packages/twilio/rest/api/v2010/account/transcription.py
|
Natfan/funlittlethings
|
80d5378b45b5c0ead725942ee50403bd057514a6
|
[
"MIT"
] | 7 |
2020-06-06T01:06:19.000Z
|
2022-02-10T11:15:14.000Z
|
SpoTwillio/lib/python3.6/site-packages/twilio/rest/api/v2010/account/transcription.py
|
Natfan/funlittlethings
|
80d5378b45b5c0ead725942ee50403bd057514a6
|
[
"MIT"
] | 2 |
2019-10-20T14:54:47.000Z
|
2020-06-11T07:29:37.000Z
|
# coding=utf-8
"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class TranscriptionList(ListResource):
def __init__(self, version, account_sid):
"""
Initialize the TranscriptionList
:param Version version: Version that contains the resource
:param account_sid: The unique sid that identifies this account
:returns: twilio.rest.api.v2010.account.transcription.TranscriptionList
:rtype: twilio.rest.api.v2010.account.transcription.TranscriptionList
"""
super(TranscriptionList, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
}
self._uri = '/Accounts/{account_sid}/Transcriptions.json'.format(**self._solution)
def stream(self, limit=None, page_size=None):
"""
Streams TranscriptionInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.transcription.TranscriptionInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(
page_size=limits['page_size'],
)
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, limit=None, page_size=None):
"""
Lists TranscriptionInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.transcription.TranscriptionInstance]
"""
return list(self.stream(
limit=limit,
page_size=page_size,
))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of TranscriptionInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of TranscriptionInstance
:rtype: twilio.rest.api.v2010.account.transcription.TranscriptionPage
"""
params = values.of({
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return TranscriptionPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a TranscriptionContext
:param sid: Fetch by unique transcription Sid
:returns: twilio.rest.api.v2010.account.transcription.TranscriptionContext
:rtype: twilio.rest.api.v2010.account.transcription.TranscriptionContext
"""
return TranscriptionContext(
self._version,
account_sid=self._solution['account_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a TranscriptionContext
:param sid: Fetch by unique transcription Sid
:returns: twilio.rest.api.v2010.account.transcription.TranscriptionContext
:rtype: twilio.rest.api.v2010.account.transcription.TranscriptionContext
"""
return TranscriptionContext(
self._version,
account_sid=self._solution['account_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.TranscriptionList>'
class TranscriptionPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the TranscriptionPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: The unique sid that identifies this account
:returns: twilio.rest.api.v2010.account.transcription.TranscriptionPage
:rtype: twilio.rest.api.v2010.account.transcription.TranscriptionPage
"""
super(TranscriptionPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of TranscriptionInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.transcription.TranscriptionInstance
:rtype: twilio.rest.api.v2010.account.transcription.TranscriptionInstance
"""
return TranscriptionInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.TranscriptionPage>'
class TranscriptionContext(InstanceContext):
def __init__(self, version, account_sid, sid):
"""
Initialize the TranscriptionContext
:param Version version: Version that contains the resource
:param account_sid: The account_sid
:param sid: Fetch by unique transcription Sid
:returns: twilio.rest.api.v2010.account.transcription.TranscriptionContext
:rtype: twilio.rest.api.v2010.account.transcription.TranscriptionContext
"""
super(TranscriptionContext, self).__init__(version)
# Path Solution
self._solution = {
'account_sid': account_sid,
'sid': sid,
}
self._uri = '/Accounts/{account_sid}/Transcriptions/{sid}.json'.format(**self._solution)
def fetch(self):
"""
Fetch a TranscriptionInstance
:returns: Fetched TranscriptionInstance
:rtype: twilio.rest.api.v2010.account.transcription.TranscriptionInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return TranscriptionInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the TranscriptionInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.TranscriptionContext {}>'.format(context)
class TranscriptionInstance(InstanceResource):
class Status(object):
IN_PROGRESS = "in-progress"
COMPLETED = "completed"
FAILED = "failed"
def __init__(self, version, payload, account_sid, sid=None):
"""
Initialize the TranscriptionInstance
:returns: twilio.rest.api.v2010.account.transcription.TranscriptionInstance
:rtype: twilio.rest.api.v2010.account.transcription.TranscriptionInstance
"""
super(TranscriptionInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload['account_sid'],
'api_version': payload['api_version'],
'date_created': deserialize.rfc2822_datetime(payload['date_created']),
'date_updated': deserialize.rfc2822_datetime(payload['date_updated']),
'duration': payload['duration'],
'price': deserialize.decimal(payload['price']),
'price_unit': payload['price_unit'],
'recording_sid': payload['recording_sid'],
'sid': payload['sid'],
'status': payload['status'],
'transcription_text': payload['transcription_text'],
'type': payload['type'],
'uri': payload['uri'],
}
# Context
self._context = None
self._solution = {
'account_sid': account_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: TranscriptionContext for this TranscriptionInstance
:rtype: twilio.rest.api.v2010.account.transcription.TranscriptionContext
"""
if self._context is None:
self._context = TranscriptionContext(
self._version,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The unique sid that identifies this account
:rtype: unicode
"""
return self._properties['account_sid']
@property
def api_version(self):
"""
:returns: The api_version
:rtype: unicode
"""
return self._properties['api_version']
@property
def date_created(self):
"""
:returns: The date this resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date this resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def duration(self):
"""
:returns: The duration of the transcribed audio, in seconds.
:rtype: unicode
"""
return self._properties['duration']
@property
def price(self):
"""
:returns: The charge for this transcription
:rtype: unicode
"""
return self._properties['price']
@property
def price_unit(self):
"""
:returns: The currency in which Price is measured
:rtype: unicode
"""
return self._properties['price_unit']
@property
def recording_sid(self):
"""
:returns: The string that uniquely identifies the recording
:rtype: unicode
"""
return self._properties['recording_sid']
@property
def sid(self):
"""
:returns: A string that uniquely identifies this transcription
:rtype: unicode
"""
return self._properties['sid']
@property
def status(self):
"""
:returns: The status of the transcription
:rtype: TranscriptionInstance.Status
"""
return self._properties['status']
@property
def transcription_text(self):
"""
:returns: The text content of the transcription.
:rtype: unicode
"""
return self._properties['transcription_text']
@property
def type(self):
"""
:returns: The type
:rtype: unicode
"""
return self._properties['type']
@property
def uri(self):
"""
:returns: The URI for this resource
:rtype: unicode
"""
return self._properties['uri']
def fetch(self):
"""
Fetch a TranscriptionInstance
:returns: Fetched TranscriptionInstance
:rtype: twilio.rest.api.v2010.account.transcription.TranscriptionInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the TranscriptionInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.TranscriptionInstance {}>'.format(context)
| 31.579775 | 96 | 0.611186 |
63d3491390bafef125136c3a96eb9b6a26127b0f
| 1,317 |
py
|
Python
|
flow/utils/runningstat.py
|
mepear/flow
|
4fc6ceaf64ca522b5a5c4104a3098b20cf207dd4
|
[
"MIT"
] | 1 |
2021-03-05T07:39:51.000Z
|
2021-03-05T07:39:51.000Z
|
flow/utils/runningstat.py
|
mepear/flow
|
4fc6ceaf64ca522b5a5c4104a3098b20cf207dd4
|
[
"MIT"
] | 1 |
2021-09-13T02:16:02.000Z
|
2021-09-13T02:16:02.000Z
|
flow/utils/runningstat.py
|
mepear/flow
|
4fc6ceaf64ca522b5a5c4104a3098b20cf207dd4
|
[
"MIT"
] | 1 |
2021-08-21T13:58:30.000Z
|
2021-08-21T13:58:30.000Z
|
import numpy as np
class RunningStat(object):
'''
Keeps track of first and second moments (mean and variance)
of a streaming time series.
Taken from https://github.com/joschu/modular_rl
Math in http://www.johndcook.com/blog/standard_deviation/
'''
def __init__(self, shape):
self._n = 0
self._M = np.zeros(shape)
# self._S = np.zeros(shape)
self._STD = np.zeros(shape)
def push(self, x):
x = np.asarray(x)
assert x.shape == self._M.shape
self._n += 1
if self._n == 1:
self._M[...] = x
else:
oldM = self._M.copy()
self._M[...] = oldM + (x - oldM) / self._n
# self._S[...] = self._S + (x - oldM) * (x - self._M)
old_std = self._STD.copy()
self._STD[...] = np.sqrt(np.square(self._STD) + ((x - oldM) * (x - self._M) - np.square(old_std)) / (self._n - 1))
@property
def n(self):
return self._n
@property
def mean(self):
return self._M
# @property
# def var(self):
# return self._S / (self._n - 1) if self._n > 1 else np.square(self._M)
@property
def std(self):
# return np.sqrt(self.var)
return self._STD
@property
def shape(self):
return self._M.shape
| 29.931818 | 126 | 0.53303 |
d0b48b9b14b2fb44169bbe77948eefda93dfbc5b
| 5,062 |
py
|
Python
|
src/spaceone/inventory/libs/schema/metadata/dynamic_layout.py
|
jihyungSong/plugin-aws-personal-health-dashboard
|
5bef4e79d1ee7c27b9231bbe79389218951929c9
|
[
"Apache-2.0"
] | 1 |
2020-11-16T10:52:02.000Z
|
2020-11-16T10:52:02.000Z
|
src/spaceone/inventory/libs/schema/metadata/dynamic_layout.py
|
jihyungSong/plugin-aws-personal-health-dashboard
|
5bef4e79d1ee7c27b9231bbe79389218951929c9
|
[
"Apache-2.0"
] | 2 |
2022-03-24T05:09:23.000Z
|
2022-03-28T06:33:01.000Z
|
src/spaceone/inventory/libs/schema/metadata/dynamic_layout.py
|
jihyungSong/plugin-aws-personal-health-dashboard
|
5bef4e79d1ee7c27b9231bbe79389218951929c9
|
[
"Apache-2.0"
] | 2 |
2021-02-18T07:56:17.000Z
|
2021-03-23T01:53:40.000Z
|
from schematics import Model
from schematics.types import StringType, PolyModelType, ListType
from spaceone.inventory.libs.schema.metadata.dynamic_field import BaseDynamicField, TextDyField
class LayoutOptions(Model):
class Options:
serialize_when_none = False
root_path = StringType(serialize_when_none=False)
class BaseLayoutField(Model):
@staticmethod
def _set_fields(fields=[], **kwargs):
_options = {'fields': fields}
for k, v in kwargs.items():
if v is not None:
_options[k] = v
return _options
name = StringType(default='')
type = StringType(default="item",
choices=("item", "table", "query-search-table", "simple-table", "list", "raw", "html"))
options = PolyModelType(LayoutOptions, serialize_when_none=False)
class ItemLayoutOption(LayoutOptions):
fields = ListType(PolyModelType(BaseDynamicField))
class SimpleTableLayoutOption(LayoutOptions):
fields = ListType(PolyModelType(BaseDynamicField))
class TableLayoutOption(LayoutOptions):
fields = ListType(PolyModelType(BaseDynamicField))
class QuerySearchTableLayoutOption(LayoutOptions):
fields = ListType(PolyModelType(BaseDynamicField))
class RawLayoutOption(LayoutOptions):
class Options:
serialize_when_none = False
class HTMLLayoutOption(LayoutOptions):
class Options:
serialize_when_none = False
class ListLayoutOption(LayoutOptions):
layouts = ListType(PolyModelType(BaseLayoutField))
class ItemDynamicLayout(BaseLayoutField):
type = StringType(default='item')
options = PolyModelType(ItemLayoutOption)
@classmethod
def set(cls, name='', root_path=''):
return cls({'name': name, 'options': ItemLayoutOption({'root_path': root_path})})
@classmethod
def set_fields(cls, name='', root_path=None, fields=[]):
_options = cls._set_fields(fields, root_path=root_path)
return cls({'name': name, 'options': ItemLayoutOption(_options)})
class TableDynamicLayout(BaseLayoutField):
type = StringType(default='table')
options = PolyModelType(TableLayoutOption)
@classmethod
def set(cls, name='', root_path=''):
return cls(name=name, root_path=root_path, options=TableLayoutOption({'root_path': root_path}))
@classmethod
def set_fields(cls, name='', root_path=None, fields=[]):
_options = cls._set_fields(fields, root_path=root_path)
return cls({'name': name, 'options': TableLayoutOption(_options)})
class QuerySearchTableDynamicLayout(BaseLayoutField):
type = StringType(default='query-search-table')
options = PolyModelType(QuerySearchTableLayoutOption)
@classmethod
def set(cls, name=''):
return cls(name=name, options=QuerySearchTableLayoutOption())
@classmethod
def set_fields(cls, name='', fields=[]):
_options = cls._set_fields(fields)
return cls({'name': name, 'options': QuerySearchTableLayoutOption(_options)})
class SimpleTableDynamicLayout(BaseLayoutField):
type = StringType(default='simple-table')
options = PolyModelType(SimpleTableLayoutOption)
@classmethod
def set(cls, name='', root_path=''):
return cls({'name': name, 'options': SimpleTableLayoutOption({'root_path': root_path})})
@classmethod
def set_fields(cls, name='', root_path=None, fields=[]):
_options = cls._set_fields(fields, root_path=root_path)
return cls({'name': name, 'options': SimpleTableLayoutOption(_options)})
@classmethod
def set_tags(cls, name='Tags', root_path='data.tags', fields=None):
if fields is None:
fields = [
TextDyField.data_source('Key', 'key'),
TextDyField.data_source('Value', 'value'),
]
return cls.set_fields(name, root_path, fields)
class ListDynamicLayout(BaseLayoutField):
type = StringType(default='list')
options = PolyModelType(ListLayoutOption)
@classmethod
def set(cls, name='', layouts=[]):
return cls(name=name, options=ListLayoutOption({'layouts': layouts}))
@classmethod
def set_layouts(cls, name='', layouts=[]):
return cls({'name': name, 'options': ListLayoutOption({'layouts': layouts})})
class RawDynamicLayout(BaseLayoutField):
type = StringType(default='raw')
options = PolyModelType(RawLayoutOption)
@classmethod
def set(cls, name='', root_path=None):
if root_path is None:
_options = RawLayoutOption()
else:
_options = RawLayoutOption({'root_path': root_path})
return cls({'name': name, 'options': _options})
class HTMLDynamicLayout(BaseLayoutField):
type = StringType(default='html')
options = PolyModelType(HTMLLayoutOption)
@classmethod
def set(cls, name='', root_path=None):
if root_path is None:
_options = HTMLLayoutOption()
else:
_options = HTMLLayoutOption({'root_path': root_path})
return cls({'name': name, 'options': _options})
| 31.440994 | 109 | 0.682537 |
718cfae0263f8452733b81f0338d1bc44555bfd9
| 5,642 |
py
|
Python
|
core/ycheck/bugs.py
|
aserdean/hotsos
|
a0f17a7ee2f08a4da0a269d478dec7ebb8f12493
|
[
"Apache-2.0"
] | null | null | null |
core/ycheck/bugs.py
|
aserdean/hotsos
|
a0f17a7ee2f08a4da0a269d478dec7ebb8f12493
|
[
"Apache-2.0"
] | null | null | null |
core/ycheck/bugs.py
|
aserdean/hotsos
|
a0f17a7ee2f08a4da0a269d478dec7ebb8f12493
|
[
"Apache-2.0"
] | null | null | null |
from core import constants
from core.log import log
from core.checks import DPKGVersionCompare
from core.known_bugs_utils import add_known_bug
from core.ycheck import (
YDefsLoader,
YDefsSection,
AutoChecksBase,
)
from core.searchtools import FileSearcher, SearchDef
class YBugChecker(AutoChecksBase):
"""
Class used to identify bugs by matching content from files or commands.
Searches are defined in defs/bugs.yaml per plugin and run automatically.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, searchobj=FileSearcher(), **kwargs)
self._bug_defs = None
def _load_bug_definitions(self):
""" Load bug search definitions from yaml """
plugin_bugs = YDefsLoader('bugs').load_plugin_defs()
if not plugin_bugs:
return
ybugchecks = YDefsSection(constants.PLUGIN_NAME, plugin_bugs)
log.debug("loaded plugin '%s' bugs - sections=%s, events=%s",
ybugchecks.name,
len(ybugchecks.branch_sections),
len(ybugchecks.leaf_sections))
if ybugchecks.requires and not ybugchecks.requires.passes:
log.debug("plugin not runnable - skipping bug checks")
return
bug_defs = []
for bug in ybugchecks.leaf_sections:
bdef = {'bug_id': str(bug.name),
'context': bug.context,
'settings': bug.settings,
'message': bug.raises.message,
'message_format_result_groups': bug.raises.format_groups}
if bug.expr:
pattern = bug.expr.value
datasource = bug.input.path
searchdef = SearchDef(pattern,
tag=bdef['bug_id'],
hint=bug.hint.value)
bdef['searchdef'] = searchdef
bdef['datasource'] = datasource
log.debug("bug=%s path=%s", bdef['bug_id'], bdef.get('datasource'))
bug_defs.append(bdef)
self._bug_defs = bug_defs
@property
def bug_definitions(self):
"""
@return: dict of SearchDef objects and datasource for all entries in
bugs.yaml under _yaml_defs_group.
"""
if self._bug_defs is not None:
return self._bug_defs
self._load_bug_definitions()
return self._bug_defs
def load(self):
if not self.bug_definitions:
return
for bugsearch in self.bug_definitions:
if 'searchdef' in bugsearch:
self.searchobj.add_search_term(bugsearch['searchdef'],
bugsearch['datasource'])
def package_has_bugfix(self, pkg_version, versions_affected):
for item in sorted(versions_affected, key=lambda i: i['min-fixed'],
reverse=True):
min_fixed = item['min-fixed']
min_broken = item['min-broken']
lt_fixed = pkg_version < DPKGVersionCompare(min_fixed)
if min_broken:
lt_broken = pkg_version < DPKGVersionCompare(min_broken)
else:
lt_broken = None
if lt_broken:
continue
if lt_fixed:
return False
else:
return True
return True
def get_format_list(self, result_group_indexes, search_result):
values = []
for idx in result_group_indexes:
values.append(search_result.get(idx))
return values
def run(self, results):
if not self.bug_definitions:
return
for bugsearch in self.bug_definitions:
format_dict = {}
format_list = []
bug_id = bugsearch['bug_id']
settings = bugsearch['settings']
if settings and settings.versions_affected and settings.package:
pkg = settings.package
pkg_ver = bugsearch['context'].apt_all.get(pkg)
if pkg_ver:
if self.package_has_bugfix(pkg_ver,
settings.versions_affected):
# No need to search since the bug is fixed.
log.debug('bug %s already fixed in package %s version '
'%s - skipping check', bug_id, pkg, pkg_ver)
continue
format_dict = {'package_name': pkg,
'version_current': pkg_ver}
else:
log.debug("package %s not installed - skipping check", pkg)
continue
message = bugsearch['message']
if 'searchdef' in bugsearch:
bug_matches = results.find_by_tag(bug_id)
if not bug_matches:
continue
indexes = bugsearch['message_format_result_groups']
if indexes:
# we only use the first result
first_match = bug_matches[0]
format_list = self.get_format_list(indexes,
first_match)
log.debug("bug %s identified", bug_id)
if format_list:
add_known_bug(bug_id, message.format(*format_list))
elif format_dict:
log.debug(message.format(**format_dict))
add_known_bug(bug_id, message.format(**format_dict))
else:
add_known_bug(bug_id, message)
| 36.636364 | 79 | 0.546083 |
c39cb6a07546dd686e33bc6d24a298c2cde6bec0
| 2,620 |
py
|
Python
|
openpeerpower/components/epsonworkforce/sensor.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 1 |
2021-07-08T20:09:55.000Z
|
2021-07-08T20:09:55.000Z
|
openpeerpower/components/epsonworkforce/sensor.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 47 |
2021-02-21T23:43:07.000Z
|
2022-03-31T06:07:10.000Z
|
openpeerpower/components/epsonworkforce/sensor.py
|
OpenPeerPower/core
|
f673dfac9f2d0c48fa30af37b0a99df9dd6640ee
|
[
"Apache-2.0"
] | null | null | null |
"""Support for Epson Workforce Printer."""
from datetime import timedelta
from epsonprinter_pkg.epsonprinterapi import EpsonPrinterAPI
import voluptuous as vol
from openpeerpower.components.sensor import PLATFORM_SCHEMA, SensorEntity
from openpeerpower.const import CONF_HOST, CONF_MONITORED_CONDITIONS, PERCENTAGE
from openpeerpower.exceptions import PlatformNotReady
import openpeerpower.helpers.config_validation as cv
MONITORED_CONDITIONS = {
"black": ["Ink level Black", PERCENTAGE, "mdi:water"],
"photoblack": ["Ink level Photoblack", PERCENTAGE, "mdi:water"],
"magenta": ["Ink level Magenta", PERCENTAGE, "mdi:water"],
"cyan": ["Ink level Cyan", PERCENTAGE, "mdi:water"],
"yellow": ["Ink level Yellow", PERCENTAGE, "mdi:water"],
"clean": ["Cleaning level", PERCENTAGE, "mdi:water"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_MONITORED_CONDITIONS): vol.All(
cv.ensure_list, [vol.In(MONITORED_CONDITIONS)]
),
}
)
SCAN_INTERVAL = timedelta(minutes=60)
def setup_platform(opp, config, add_devices, discovery_info=None):
"""Set up the cartridge sensor."""
host = config.get(CONF_HOST)
api = EpsonPrinterAPI(host)
if not api.available:
raise PlatformNotReady()
sensors = [
EpsonPrinterCartridge(api, condition)
for condition in config[CONF_MONITORED_CONDITIONS]
]
add_devices(sensors, True)
class EpsonPrinterCartridge(SensorEntity):
"""Representation of a cartridge sensor."""
def __init__(self, api, cartridgeidx):
"""Initialize a cartridge sensor."""
self._api = api
self._id = cartridgeidx
self._name = MONITORED_CONDITIONS[self._id][0]
self._unit = MONITORED_CONDITIONS[self._id][1]
self._icon = MONITORED_CONDITIONS[self._id][2]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit
@property
def state(self):
"""Return the state of the device."""
return self._api.getSensorValue(self._id)
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self._api.available
def update(self):
"""Get the latest data from the Epson printer."""
self._api.update()
| 30.114943 | 80 | 0.674046 |
b01f4ded78db9a6b7ea6f76cc6ce4fd8e3006101
| 2,626 |
py
|
Python
|
uctt_cli/__init__.py
|
james-nesbitt/uc
|
6bbbdae03c523081e3c70e9514aa45ab539e49fe
|
[
"MIT"
] | 1 |
2021-02-07T18:47:50.000Z
|
2021-02-07T18:47:50.000Z
|
uctt_cli/__init__.py
|
james-nesbitt/uctt
|
6bbbdae03c523081e3c70e9514aa45ab539e49fe
|
[
"MIT"
] | null | null | null |
uctt_cli/__init__.py
|
james-nesbitt/uctt
|
6bbbdae03c523081e3c70e9514aa45ab539e49fe
|
[
"MIT"
] | null | null | null |
"""
UCTT Cli package
"""
import logging
from uctt.plugin import Type, Factory
from uctt.environment import Environment
from .info import InfoCliPlugin
from .config import ConfigCliPlugin
from .environment import EnvironmentCliPlugin
from .fixtures import FixturesCliPlugin
from .output import OutputCliPlugin
from .provisioner import ProvisionerCliPlugin
logger = logging.getLogger('uctt.cli')
UCTT_PLUGIN_ID_CLI_INFO = 'info'
""" cli plugin_id for the info plugin """
@Factory(type=Type.CLI, plugin_id=UCTT_PLUGIN_ID_CLI_INFO)
def uctt_plugin_factory_cli_info(
environment: Environment, instance_id: str = ''):
""" create an info cli plugin """
return InfoCliPlugin(environment, instance_id)
UCTT_PLUGIN_ID_CLI_CONFIG = 'config'
""" cli plugin_id for the config plugin """
@Factory(type=Type.CLI, plugin_id=UCTT_PLUGIN_ID_CLI_CONFIG)
def uctt_plugin_factory_cli_config(
environment: Environment, instance_id: str = ''):
""" create a config cli plugin """
return ConfigCliPlugin(environment, instance_id)
UCTT_PLUGIN_ID_CLI_ENVIRONMENT = 'environment'
""" cli plugin_id for the environment plugin """
@Factory(type=Type.CLI, plugin_id=UCTT_PLUGIN_ID_CLI_ENVIRONMENT)
def uctt_plugin_factory_cli_environment(
environment: Environment, instance_id: str = ''):
""" create an environment cli plugin """
return EnvironmentCliPlugin(environment, instance_id)
UCTT_PLUGIN_ID_CLI_FIXTURES = 'fixtures'
""" cli plugin_id for the fixtures plugin """
@Factory(type=Type.CLI, plugin_id=UCTT_PLUGIN_ID_CLI_FIXTURES)
def uctt_plugin_factory_cli_fixtures(
environment: Environment, instance_id: str = ''):
""" create a fixtures cli plugin """
return FixturesCliPlugin(environment, instance_id)
UCTT_PLUGIN_ID_CLI_OUTPUT = 'output'
""" cli plugin_id for the output plugin """
@Factory(type=Type.CLI, plugin_id=UCTT_PLUGIN_ID_CLI_OUTPUT)
def uctt_plugin_factory_output_config(
environment: Environment, instance_id: str = ''):
""" create a output cli plugin """
return OutputCliPlugin(environment, instance_id)
UCTT_PLUGIN_ID_CLI_PROVISIONER = 'provisioner'
""" cli plugin_id for the provisioner plugin """
@Factory(type=Type.CLI, plugin_id=UCTT_PLUGIN_ID_CLI_PROVISIONER)
def uctt_plugin_factory_provisioner_config(
environment: Environment, instance_id: str = ''):
""" create a provisioner cli plugin """
return ProvisionerCliPlugin(environment, instance_id)
""" SetupTools EntryPoint BootStrapping """
def bootstrap(environment: Environment):
""" UCTT Bootstrapper - don't actually do anything """
pass
| 28.236559 | 65 | 0.760853 |
4d5496e966038e533f6f785c3c07d826424834fa
| 19,796 |
py
|
Python
|
sed_config.py
|
python-enthusiasm/c3po
|
d3cee842170659526c7a8463e17105d63af479a6
|
[
"MIT"
] | null | null | null |
sed_config.py
|
python-enthusiasm/c3po
|
d3cee842170659526c7a8463e17105d63af479a6
|
[
"MIT"
] | null | null | null |
sed_config.py
|
python-enthusiasm/c3po
|
d3cee842170659526c7a8463e17105d63af479a6
|
[
"MIT"
] | null | null | null |
# Functions for SED fitting
import os
import pickle
from time import time
from string import ascii_uppercase as UPPERS
import numpy as np
from scipy import integrate
from scipy import constants
from scipy.interpolate import interp1d as interp
from scipy.interpolate import UnivariateSpline as uni_spline
import matplotlib.pyplot as plt
from astropy.io import ascii
################################################################################
#### BEGIN OPTIONS
################################################################################
# These should be the only directories you have to change.
HOME_DIR = '/Users/lacc/Documents/Justin/c3po/'
FPATH = HOME_DIR + 'Temperatures/'
ARR_DIR = HOME_DIR + 'Arrays/'
GRAINSIZES = np.loadtxt(ARR_DIR+'GrainSizes.dat')
WAVELENGTHS = np.loadtxt(ARR_DIR+'Wavelengths.dat')
STAR_TEMPS = np.linspace(2000, 15000, 14)
DISK_RADII = np.logspace(-1, 3, 121)
WAVES = np.logspace(-1, 3, 1000)
grainComps = ['AstroSil', 'DirtyIceAstroSil']
GRAIN_TEMPS_TOTAL = dict()
EMISSIVITIES_TOTAL = dict()
for grain in grainComps:
GRAIN_TEMPS_TOTAL[grain] = np.load(ARR_DIR+grain+'GrainTemps.npy')
EMISSIVITIES_TOTAL[grain] = np.load(ARR_DIR+grain+'Emissivities.npy')
################################################################################
#### END OPTIONS
################################################################################
# Find nearest value in an array
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx]
def find_nearest_ind(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
# Convolution function for calibrating the IRS data
def convolve(filterwaves, filterresponse, datawaves, dataflux):
top = integrate.simps(
np.interp(filterwaves, datawaves, dataflux)*filterresponse,
filterwaves)
bottom = integrate.simps(filterresponse, filterwaves)
return top/bottom
def stitch_trim_lr(sData, spitzFlags, spitzNames):
wa = 'wavelength'
fx = 'flux'
er = 'error'
sl2f, sl1f, ll2f, ll1f = spitzFlags
sl2, sl1, ll2, ll1 = spitzNames
# Before stitching, we trim spitzer data to <35 microns
if ll1f and 0:
index = np.where(sData[ll1][wa]<35.0)
sData[ll1][fx] = sData[ll1][fx][index]
sData[ll1][er] = sData[ll1][er][index]
sData[ll1][wa] = sData[ll1][wa][index]
k=3
lastvalue = -2
firstvalue = 1
if sl1f and sl2f:
sl1half = sData[sl1][wa].size/2
sl2half = sData[sl2][wa].size/2
SL1_fit = uni_spline(sData[sl1][wa][:sl1half],
sData[sl1][fx][:sl1half],k=k)
SL2_fit = uni_spline(sData[sl2][wa][sl2half:],
sData[sl2][fx][sl2half:],k=k)
SL2_SL1 = np.linspace(sData[sl2][wa][lastvalue],
sData[sl1][wa][firstvalue], 3)
SL2_N = np.nanmean(SL2_fit(SL2_SL1))
SL1_N = np.nanmean(SL1_fit(SL2_SL1))
SL1_norm = SL2_N / SL1_N
sData[sl1][fx] *= SL1_norm
if ll2f and sl1f:
ll2half = sData[ll2][wa].size/2
sl1half = sData[sl1][wa].size/2
LL2_fit = uni_spline(sData[ll2][wa][:ll2half],
sData[ll2][fx][:ll2half],k=k)
SL1_fit = uni_spline(sData[sl1][wa][sl1half:],
sData[sl1][fx][sl1half:],k=k)
SL1_LL2 = np.linspace(sData[sl1][wa][lastvalue],
sData[ll2][wa][firstvalue], 3)
LL2_N = np.nanmean(LL2_fit(SL1_LL2))
SL1_N = np.nanmean(SL1_fit(SL1_LL2))
LL2_norm = SL1_N / LL2_N
sData[ll2][fx] *= LL2_norm
if ll1f and ll2f:
ll1half = sData[ll1][wa].size/2
ll2half = sData[ll2][wa].size/2
LL2_fit = uni_spline(sData[ll2][wa][ll2half:],
sData[ll2][fx][ll2half:],k=k)
LL1_fit = uni_spline(sData[ll1][wa][:ll1half],
sData[ll1][fx][:ll1half],k=k)
LL2_LL1 = np.linspace(sData[ll2][wa][lastvalue],
sData[ll1][wa][firstvalue], 3)
LL2_N = np.nanmean(LL2_fit(LL2_LL1))
LL1_N = np.nanmean(LL1_fit(LL2_LL1))
LL1_norm = LL2_N / LL1_N
sData[ll1][fx] *= LL1_norm
return sData
def stitch_lr(sData, spitzFlags, spitzNames):
wa = 'wavelength'
fx = 'flux'
sl2f, sl1f, ll2f, ll1f = spitzFlags
sl2, sl1, ll2, ll1 = spitzNames
lastvalue = -5
firstvalue = 4
if sl1f and sl2f:
sl1half = sData[sl1][wa].size/2
sl2half = sData[sl2][wa].size/2
SL1_fit = uni_spline(sData[sl1][wa][:sl1half],
sData[sl1][fx][:sl1half])
SL2_fit = uni_spline(sData[sl2][wa][sl2half:],
sData[sl2][fx][sl2half:])
# SL1_fit = uni_spline(sData[sl1][wa],
# sData[sl1][fx])
# SL2_fit = uni_spline(sData[sl2][wa],
# sData[sl2][fx])
mid = np.nanmean([sData[sl1][wa][firstvalue], sData[sl2][wa][lastvalue]])
SL2_N = SL2_fit(mid)
SL1_N = SL1_fit(mid)
SL1_norm = SL2_N/SL1_N
sData[sl1][fx] *= SL1_norm
if ll2f and sl1f:
ll2half = sData[ll2][wa].size/2
sl1half = sData[sl1][wa].size/2
LL2_fit = uni_spline(sData[ll2][wa][:ll2half],
sData[ll2][fx][:ll2half])
SL1_fit = uni_spline(sData[sl1][wa][sl1half:],
sData[sl1][fx][sl1half:])
# LL2_fit = uni_spline(sData[ll2][wa],
# sData[ll2][fx])
# SL1_fit = uni_spline(sData[sl1][wa],
# sData[sl1][fx])
mid = np.nanmean([sData[sl1][wa][firstvalue], sData[sl2][wa][lastvalue]])
LL2_N = LL2_fit(mid)
SL1_N = SL1_fit(mid)
LL2_norm = SL1_N/LL2_N
sData[ll2][fx] *= LL2_norm
if ll1f and ll2f:
ll1half = sData[ll1][wa].size/2
ll2half = sData[ll2][wa].size/2
LL2_fit = uni_spline(sData[ll2][wa][ll2half:],
sData[ll2][fx][ll2half:])
LL1_fit = uni_spline(sData[ll1][wa][:ll1half],
sData[ll1][fx][:ll1half])
# LL2_fit = uni_spline(sData[ll2][wa],
# sData[ll2][fx])
# LL1_fit = uni_spline(sData[ll1][wa],
# sData[ll1][fx])
mid = np.nanmean([sData[ll2][wa][firstvalue], sData[ll1][wa][lastvalue]])
LL2_N = LL2_fit(mid)
LL1_N = LL1_fit(mid)
LL1_norm = LL2_N/LL1_N
sData[ll1][fx] *= LL1_norm
return sData
def stitch_trim_rl(sData, spitzFlags, spitzNames):
wa = 'wavelength'
fx = 'flux'
er = 'error'
sl2f, sl1f, ll2f, ll1f = spitzFlags
sl2, sl1, ll2, ll1 = spitzNames
# Before stitching, we trim spitzer data to <35 microns
if ll1f:
index = np.where(sData[ll1][wa]<35.0)
sData[ll1][fx] = sData[ll1][fx][index]
sData[ll1][er] = sData[ll1][er][index]
sData[ll1][wa] = sData[ll1][wa][index]
lastvalue = -4
firstvalue = 3
if ll2f and ll1f:
ll1half = sData[ll1][wa].size/2
ll2half = sData[ll2][wa].size/2
LL2_fit = uni_spline(sData[ll2][wa][ll2half:],
sData[ll2][fx][ll2half:])
LL1_fit = uni_spline(sData[ll1][wa][:ll1half],
sData[ll1][fx][:ll1half])
LL2_LL1 = np.linspace(sData[ll2][wa][lastvalue],
sData[ll1][wa][firstvalue], 100)
LL2_N = np.nanmean(LL2_fit(LL2_LL1))
LL1_N = np.nanmean(LL1_fit(LL2_LL1))
LL2_norm = LL1_N/LL2_N
sData[ll2][fx] *= LL2_norm
if sl1f and ll2f:
ll2half = sData[ll2][wa].size/2
sl1half = sData[sl1][wa].size/2
LL2_fit = uni_spline(sData[ll2][wa][:ll2half],
sData[ll2][fx][:ll2half])
SL1_fit = uni_spline(sData[sl1][wa][sl1half:],
sData[sl1][fx][sl1half:])
SL1_LL2 = np.linspace(sData[sl1][wa][lastvalue],
sData[ll2][wa][firstvalue], 100)
LL2_N = np.nanmean(LL2_fit(SL1_LL2))
SL1_N = np.nanmean(SL1_fit(SL1_LL2))
SL1_norm = LL2_N/SL1_N
sData[sl1][fx] *= SL1_norm
if sl2f and sl1f:
sl1half = sData[sl1][wa].size/2
sl2half = sData[sl2][wa].size/2
SL1_fit = uni_spline(sData[sl1][wa][:sl1half],
sData[sl1][fx][:sl1half])
SL2_fit = uni_spline(sData[sl2][wa][sl2half:],
sData[sl2][fx][sl2half:])
SL2_SL1 = np.linspace(sData[sl2][wa][lastvalue],
sData[sl1][wa][firstvalue], 100)
SL2_N = np.nanmean(SL2_fit(SL2_SL1))
SL1_N = np.nanmean(SL1_fit(SL2_SL1))
SL2_norm = SL1_N/SL2_N
sData[sl2][fx] *= SL2_norm
return sData
def norm_blackbodies(sData, fitWaves, nWarm, nCold, t_1, t_2):
wa = 'wavelength'
fx = 'flux'
er = 'error'
w4 = 'WISE4'
mp24, mp70 = 'MIPS24', 'MIPS70'
h70, h100 = 'HerschelPACS70', 'HerschelPACS100'
h160 = 'HerschelPACS160'
ll1 = 'SpitzerIRS-LL1'
# Which values are present in the data?
mp24f, ll1f, w4f = nWarm
mp70f, h70f, h100f, h160f = nCold
# Initial guesses for the black bodies
# fitWaves = [23.68, 22.194]
# fitWaves += [i+1 for i in range(160)]
# fitWaves = sorted(fitWaves)
# bb1 = b_lam(fitWaves, t_1)
# bb2 = b_lam(fitWaves, t_2)
wav = np.logspace(-1, 2.3, 1000)
bb1 = b_lam(wav, t_1)
bb2 = b_lam(wav, t_2)
# Normalize warm dust
if mp24f:
n_1 = np.nanmean(sData[mp24][fx]) / bb1.max()
elif ll1f:
index2 = np.where(np.logical_and(sData[ll1][wa]>20, sData[ll1][wa]<25))
n_1 = np.nanmean(sData[ll1][fx][index2]) / bb1.max()
elif w4f:
n_1 = np.nanmean(sData[w4][fx]) / bb1.max()
else:
n_1 = 1
# Normalize cold dust
if mp70f:
n_2 = np.nanmean(sData[mp70][fx]) / bb2.max()
elif h70f:
n_2 = np.nanmean(sData[h70][fx]) / bb2.max()
elif h100f:
n_2 = np.nanmean(sData[h100][fx]) / bb2.max()
elif h160f:
n_2 = np.nanmean(sData[h160][fx]) / bb2.max()
else:
n_2 = n_1
# # Normalize warm dust
# if mp24f:
# # index1 = np.where(fitWaves==23.68)
# index1 = np.where(np.logical_and(wav>=23, wav<=24))
# n_1 = np.nanmean(sData[mp24][fx]) / np.nanmean(bb1[index1])
# elif ll1f:
# # index1 = np.where(np.logical_and(fitWaves>=20, fitWaves<=25))
# index1 = np.where(np.logical_and(wav>=20, wav<=25))
# index2 = np.where(np.logical_and(sData[ll1][wa]>20, sData[ll1][wa]<25))
# n_1 = np.nanmean(sData[ll1][fx][index2]) / np.nanmean(bb1[index1])
# elif w4f:
# # index1 = np.where(fitWaves==22.194)
# index1 = np.where(np.logical_and(wav>=21, wav<=23))
# n_1 = np.nanmean(sData[w4][fx]) / np.nanmean(bb1[index1])
# else:
# n_1 = 1
# # Normalize cold dust
# if mp70f:
# # index1 = np.where(fitWaves==71.42)
# index1 = np.where(np.logical_and(wav>=70.0, wav<=73.0))
# n_2 = np.nanmean(sData[mp70][fx]) / np.nanmean(bb2[index1])
# elif h70f:
# # index1 = np.where(fitWaves==70.0)
# index1 = np.where(np.logical_and(wav>=69.0, wav<=71.0))
# n_2 = np.nanmean(sData[h70][fx]) / np.nanmean(bb2[index1])
# elif h100f:
# # index1 = np.where(fitWaves==100.0)
# index1 = np.where(np.logical_and(wav>=98.0, wav<=102.0))
# n_2 = np.nanmean(sData[h100][fx]) / np.nanmean(bb2[index1])
# elif h160f:
# # index1 = np.where(fitWaves==160.0)
# index1 = np.where(np.logical_and(wav>=150.0, wav<=170.0))
# n_2 = np.nanmean(sData[h160][fx]) / np.nanmean(bb2[index1])
# else:
# n_2 = n_1
return n_1, n_2
# Blackbody radiation function, in terms of lambda. Returns FNu in janskies
def b_lam(waves, temp):
# Constants
H = constants.h
C = constants.c
K = constants.k
waves_m = waves/1e6
return (2*(C**2)*H)/((np.exp((H*C)/(waves_m*K*temp))-1.0)*(waves_m**5)) \
* (waves_m**2/C) * 1.0e19
# Blackbody radiation function, in terms of nu. Returns FNu in janskies
def b_nu(wavelengths, temperature):
H = constants.h
C = constants.c
K = constants.k
wavelengths_m = wavelengths/1.0e6
nu = C/wavelengths_m
top = 2 * H * (nu**3)
bottom = C**2 * (np.exp((H*nu)/(K*temperature)) - 1)
return top/bottom
def blowout_size(grainDensity, starL=1., starM=1., qRad=0.9):
if starL is np.nan:
starL = 1
if starM is np.nan:
starM = 1
G = constants.G
C = constants.c
# Calculate blowout grain size
grainDensity = grainDensity / 1000 / (0.01**3) # Density converted to SI
nume = 3 * starL * 3.826e26 * qRad
deno = 8 * np.pi * starM * 1.989e30 * G * C * grainDensity
return nume/deno * 1e6
# Separates data according to instrument. Input: dict of IPAC Tables
def sort_by_instrument(data):
wa = 'wavelength'
fx = 'flux'
er = 'error'
unique_insts = list()
for i in range(data['instrument'].size):
if not data['instrument'][i] in unique_insts:
unique_insts.append(data['instrument'][i])
separatedStarData = dict()
for inst in unique_insts:
index = np.where(data['instrument']==inst)
separatedStarData[inst] = dict()
separatedStarData[inst][wa] = np.array(data[wa][index])
separatedStarData[inst][fx] = np.array(data[fx][index])
separatedStarData[inst][er] = np.array(data[er][index])
return unique_insts, separatedStarData
# Remove .DS_Store file in directory
def pull_file_names(dirPath):
filenames = os.listdir(dirPath)
if '.DS_Store' in filenames:
os.remove(dirPath+'.DS_Store')
ind = filenames.index('.DS_Store')
filenames.pop(ind)
return filenames
# Create 1000 radii arrays for given star temp. Used in realistic fitting.
# Takes dictionary of grain temps and list of grain comps.
def interpTemps(starTemp, oldGrainTemps, grainComps):
STAR_TEMPS = np.linspace(2000, 15000, 14)
DISK_RADII = np.logspace(-1, 3, 121)
radii = np.logspace(-1, 3, 1000)
GRAINSIZES = np.loadtxt(HOME_DIR+'Arrays/GrainSizes.dat')
for grainComp in grainComps:
abr = ''
for letter in grainComp:
if letter in UPPERS:
abr += letter
starIndices = np.where(np.logical_and(
STAR_TEMPS<starTemp+3000,
STAR_TEMPS>starTemp-3000
))
newStarTempGrainTemps = np.empty((
DISK_RADII.size,
GRAINSIZES.size
))
for r in range(DISK_RADII.size):
for g in range(GRAINSIZES.size):
newStarTempGrainTemps[r][g] = np.interp(
starTemp,
STAR_TEMPS[starIndices],
oldGrainTemps[grainComp][starIndices][:,r][:,g]
)
newGrainTemps = np.empty((radii.size,GRAINSIZES.size))
for r in range(radii.size):
for g in range(GRAINSIZES.size):
newGrainTemps[r][g] = np.interp(
radii[r],
DISK_RADII,
newStarTempGrainTemps[:,g]
)
np.save(HOME_DIR+'Arrays/InterpGrainTemps/'+'%.0fK_%s.npy'%
(starTemp,grainComp), newGrainTemps, allow_pickle=False)
class StarObject:
def __init__(self, starD, starL, grainComp, grainTemps, blowoutSize, emis, grains):
self.starD = starD
self.starL = starL
self.grainComp = grainComp
self.grainTemps = grainTemps
self.blowoutSize = blowoutSize
self.emis = emis
self.grains = grains
self.G = constants.G # Universal gravitation constant
self.C = constants.c # Speed of light in vacuum
self.S = constants.sigma # Stephan-Boltzman constant
self.K = constants.k # Boltzman constant
self.H = constants.h # Planck's constant
self.AS_DENSITY = 3.0 # g/cm3
self.H2O_DENSITY = 1.0 # g/cm3 LATER: use vlfr to calc grain density
self.AC_DENSITY = 2.095 # g/cm3
self.radii = np.logspace(-1, 3, 1000)
def calcFlux(self, waves, r0, T_0=1):
wavelengths_m = waves / 1.0e6
# Create radii/grains arrays
sigma = 0.10 # Use 0.10 for the deviation
r0 /= np.sqrt(self.starL)
rindex = np.where(np.logical_and(self.radii<1.4*r0,
self.radii>0.6*r0))[0]
radii = self.radii[rindex]
radii *= 1.4959787066e11
r0 *= 1.4959787066e11
grainTemps = self.grainTemps[rindex]
grains = self.grains/1.0e6
# Calculate CA
# T_0 = 1
blS = self.blowoutSize/1e6
q = -3.5
exponent = -0.5 * ((radii - r0) / (sigma*r0))**2
ca = T_0*np.exp(exponent)*np.abs(3+q) \
/ (np.pi*(np.power(blS,3+q)-np.power(.001,3+q)))
ca *= 1e6
# Integral loop
da = np.diff(grains)
da = np.append(da, da[-1])
dr = np.diff(radii)
dr = np.append(dr, dr[-1])
fw = np.empty(waves.size)
fr = np.empty(radii.size)
flux = np.empty(grains.size)
for w in range(waves.size):
for r in range(radii.size):
B_nu = b_nu(waves[w], grainTemps[r])
grainflux = (grains**2/(4*((self.starD*3.08568025e16)**2))) \
* self.emis[:,w] * B_nu * ca[r] * (grains**-3.5) * da \
* 2 * np.pi * radii[r] * dr[r]
fr[r] = grainflux.sum()
fw[w] = fr.sum()*1e26
return fw
if __name__ == '__main__':
before = time()
densities = {
'AstroSil': 3.0,
'DirtyIceAstroSil': 1.12
}
asdf = ascii.read(HOME_DIR+'stars final sorted/star_HD 32977.txt')
WAVES = np.array(sorted(asdf['wavelength']))
starD = asdf.meta['keywords']['DIST_pc']['value']
if starD is np.nan:
starD = 1
starT = asdf.meta['keywords']['TEMP']['value']
if starT is np.nan:
starT = 5800
WAVES = np.logspace(-1,3, 901)
starL = 1
r0 = 0.5
T_0 = 1
grainComp = 'AstroSil'
blowoutSize = blowout_size(densities[grainComp])
try:
grainTemps = np.load(HOME_DIR+'Arrays/InterpGrainTemps/'+'%.0fK_%s.npy'%(
starT, grainComp))
except:
from sed_config import GRAIN_TEMPS_TOTAL
interpTemps(starT, GRAIN_TEMPS_TOTAL, [grainComp])
del GRAIN_TEMPS_TOTAL
grainTemps = np.load(HOME_DIR+'Arrays/InterpGrainTemps/'+'%.0fK_%s.npy'%(
starT, grainComp))
graindex = np.where(GRAINSIZES>=blowoutSize)[0]
grains = GRAINSIZES[graindex]
grainTemps = grainTemps[:,graindex]
# Interp emissivities to wavelengths
emis = np.empty((grains.size, WAVES.size))
for g in range(grains.size):
emis[g] = np.interp(WAVES,WAVELENGTHS,EMISSIVITIES_TOTAL[grainComp][g])
# (waves, r0, starD, grainComp, grainTemps, blowoutSize, emis, grains)
star = StarObject(starD, grainComp, grainTemps, blowoutSize,
emis, grains)
before = time()
flux1 = star.calcFlux(WAVES, r0, blowoutSize)
plt.plot(WAVES, flux1, label = 0.5)
print time()-before
flux2 = star.calcFlux(WAVES, 1., blowoutSize)
plt.plot(WAVES, flux2, label = 1.)
flux3 = star.calcFlux(WAVES, 5., blowoutSize)
plt.plot(WAVES, flux3, label = 5.)
flux4 = star.calcFlux(WAVES, 50., blowoutSize)
plt.plot(WAVES, flux4, label = 50.)
flux5 = star.calcFlux(WAVES, 200., blowoutSize)
plt.plot(WAVES, flux5, label = 200.)
bb1 = b_lam(WAVES, 200)
index = np.where(np.logical_and(WAVES>=25., WAVES<=35.))
bb1 *= np.nanmean(flux1[index])/np.nanmean(bb1[index])
plt.plot(WAVES, bb1, '-.', label='BB')
plt.legend()
plt.ylim(1, 1e4)
# plt.xlim(2, 200)
# plt.xlim(0.1, 1000)
# plt.ylim(1e-8, 1e16)
plt.xlim(2, 1000)
plt.semilogx()
plt.semilogy()
plt.show()
| 34.669002 | 87 | 0.577793 |
d79afe41e717c3d0bdbf260100e55afe23054ca9
| 23,952 |
py
|
Python
|
src/lava/lib/dl/slayer/neuron/adrf.py
|
valmat07/lava-dl
|
bc91ffbd8caa946afe0db6753ede4daf2bdeda2c
|
[
"BSD-3-Clause"
] | null | null | null |
src/lava/lib/dl/slayer/neuron/adrf.py
|
valmat07/lava-dl
|
bc91ffbd8caa946afe0db6753ede4daf2bdeda2c
|
[
"BSD-3-Clause"
] | null | null | null |
src/lava/lib/dl/slayer/neuron/adrf.py
|
valmat07/lava-dl
|
bc91ffbd8caa946afe0db6753ede4daf2bdeda2c
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
"""Adaptive RF Izhikevich neuron."""
import numpy as np
import torch
from . import base
from .dynamics import resonator, adaptive_phase_th
from ..spike import complex
from ..utils import quantize
# These are tuned heuristically so that scale_grad=1 and tau_grad=1 serves as
# a good starting point
# SCALE_RHO_MULT = 10
# TAU_RHO_MULT = 0.2
# SCALE_RHO_MULT = 10
# TAU_RHO_MULT = 0.5
SCALE_RHO_MULT = 0.1
TAU_RHO_MULT = 100
class Neuron(base.Neuron):
"""This is the implementation of RF neuron.
.. math::
\\mathfrak{Re}(z[t]) &= (1-\\alpha)(\\cos\\phi\\ \\mathfrak{Re}(z[t-1])
- \\sin\\phi\\ \\mathfrak{Im}(z[t-1]))
+ \\mathfrak{Re}(x[t]) + \\text{real bias}\\
\\mathfrak{Im}(z[t]) &= (1-\\alpha)(\\sin\\phi\\ \\mathfrak{Re}(z[t-1])
+ \\cos\\phi\\ \\mathfrak{Im}(z[t-1]))
+ \\mathfrak{Im}(x[t]) + \\text{imag bias} \\
\\vartheta[t] &= (1-\\alpha_{\\vartheta})\\,
(\\vartheta[t-1] - \\vartheta_0) + \\vartheta_0 \\
r[t] &= (1-\\alpha_r)\\,r[t-1] \\
s[t] &= |z[t]| \\geq (\\vartheta[t] + r[t]) \\text{ and } \\arg(z[t])=0
The internal state representations are scaled down compared to
the actual hardware implementation. This allows for a natural range of
synaptic weight values as well as the gradient parameters.
The neuron parameters like threshold, decays are represented as real
values. They internally get converted to fixed precision representation of
the hardware. It also provides properties to access the neuron
parameters in fixed precision states. The parameters are internally clamped
to the valid range.
Parameters
----------
threshold : float
neuron threshold.
threshold_step : float
the increase in threshold after spike.
period : float or tuple
period of the neuron. If ``shared_param`` is False, then it can be
specified as a tuple (min_period, max_period).
decay : float or tuple
decay factor of the neuron. If ``shared_param`` is False, then it can
be specified as a tuple (min_decay, max_decay).
threshold_decay : float or tuple
the fraction of threshold decay per time step. If ``shared_param`` is
False, then it can be specified as a tuple : min_decay, max_decay).
refractory_decay : float or tuple
the fraction of refractory decay per time step. If ``shared_param`` is
False, then it can be specified as a tuple : min_decay, max_decay).
tau_grad : float, optional
time constant of spike function derivative. Defaults to 1.
scale_grad : float, optional
scale of spike function derivative. Defaults to 1.
scale : int, optional
scale of the internal state. ``scale=1`` will result in values in the
range expected from the of Loihi hardware. Defaults to 1 << 6.
norm : fx-ptr or lambda, optional
normalization function on the dendrite output. None means no
normalization. Defaults to None.
dropout : fx-ptr or lambda, optional
neuron dropout method. None means no normalization. Defaults to None.
shared_param : bool, optional
flag to enable/disable shared parameter neuron group. If it is
False, individual parameters are assigned on a per-channel basis.
Defaults to True.
persistent_state : bool, optional
flag to enable/disable persistent state between iterations.
Defaults to False.
requires_grad : bool, optional
flag to enable/disable learning on neuron parameter. Defaults to False.
graded_spike : bool, optional
flag to enable/disable graded spike output. Defaults to False.
log_init : bool, optional
if True, initialized the natural frequency in log spaced range.
Default is True.
"""
def __init__(
self, threshold, threshold_step,
period, decay, threshold_decay, refractory_decay,
tau_grad=1, scale_grad=1, scale=1 << 6,
norm=None, dropout=None,
shared_param=True, persistent_state=False, requires_grad=False,
graded_spike=False,
log_init=True,
):
super(Neuron, self).__init__(
threshold=threshold,
tau_grad=tau_grad,
scale_grad=scale_grad,
p_scale=1 << 12,
w_scale=scale,
s_scale=scale * (1 << 6),
norm=norm,
dropout=dropout,
persistent_state=persistent_state,
shared_param=shared_param,
requires_grad=requires_grad,
complex=True
)
self.threshold_step = int(threshold_step * self.w_scale) / self.w_scale
self.graded_spike = graded_spike
self.log_init = log_init
# sin_decay and cos_decay are restricted to be inside the unit circle
# in first quadrant. This means that the oscillation step can only
# between 0 and 90 degrees, i.e. period >= 4
# It makes sense to have period >= 4 because we would not get proper
# oscillation at all in a discrete in time system for period < 4.
# It is possible to set period < 4, but it will get quantized.
# So the result might not be as desired for period = 2 and 3.
if self.shared_param is True:
if np.isscalar(decay) is False:
raise AssertionError(
f'Expected decay to be a scalar when shared_param is True.'
f' Found {decay=}.'
)
if np.isscalar(period) is False:
raise AssertionError(
f'Expected period to be a scalar when shared_param is'
f' True. Found {period=}.'
)
if period < 4:
raise AssertionError(
f'Neuron period less than 4 does not make sense. '
f'Found {period=}.'
)
sin_decay = np.sin(2 * np.pi / period) * (1 - decay)
cos_decay = np.cos(2 * np.pi / period) * (1 - decay)
self.register_parameter(
'sin_decay',
torch.nn.Parameter(
torch.FloatTensor([self.p_scale * sin_decay]),
requires_grad=self.requires_grad,
)
)
self.register_parameter(
'cos_decay',
torch.nn.Parameter(
torch.FloatTensor([self.p_scale * cos_decay]),
requires_grad=self.requires_grad,
)
)
self.register_parameter(
'threshold_decay',
torch.nn.Parameter(
torch.FloatTensor([self.p_scale * threshold_decay]),
requires_grad=self.requires_grad,
)
)
self.register_parameter(
'refractory_decay',
torch.nn.Parameter(
torch.FloatTensor([self.p_scale * refractory_decay]),
requires_grad=self.requires_grad,
)
)
else:
if np.isscalar(period) is True: # 1% jitter for now
if period < 4:
raise AssertionError(
f'Neuron period less than 4 does not make sense. '
f'Found {period=}.'
)
self.period_min = period * 0.99
self.period_max = period * 1.01
else:
if len(period) != 2:
raise AssertionError(
f'Expected period to be of length 2 i.e. [min, max]. '
f'Found {period=}.'
)
if min(period) < 4:
raise AssertionError(
f'Neuron period less than 4 does not make sense. '
f'Found {period=}.'
)
self.period_min = period[0]
self.period_max = period[1]
if np.isscalar(decay) is True:
self.decay_min = decay * 0.99
self.decay_max = decay * 1.01
else:
if len(decay) != 2:
raise AssertionError(
f'Expected decay to be of length 2 i.e. [min, max]. '
f'Found {decay=}.'
)
self.decay_min = decay[0]
self.decay_max = decay[1]
if np.isscalar(threshold_decay) is True:
self.threshold_decay_min = threshold_decay
self.threshold_decay_max = threshold_decay
else:
if len(threshold_decay) != 2:
raise AssertionError(
f'Expected threshold decay to be of length 2 i.e. '
f'[min, max]. Found {threshold_decay=}.'
)
self.threshold_decay_min = threshold_decay[0]
self.threshold_decay_max = threshold_decay[1]
if np.isscalar(refractory_decay) is True:
self.refractory_decay_min = refractory_decay
self.refractory_decay_max = refractory_decay
else:
if len(refractory_decay) != 2:
raise AssertionError(
f'Expected refractory decay to be of length 2 i.e. '
f'[min, max]. Found {refractory_decay=}.'
)
self.refractory_decay_min = refractory_decay[0]
self.refractory_decay_max = refractory_decay[1]
self.register_parameter(
'sin_decay',
torch.nn.Parameter(
torch.FloatTensor([0]),
requires_grad=self.requires_grad,
)
)
self.register_parameter(
'cos_decay',
torch.nn.Parameter(
torch.FloatTensor([0]),
requires_grad=self.requires_grad,
)
)
self.register_parameter(
'threshold_decay',
torch.nn.Parameter(
torch.FloatTensor([
self.p_scale * self.threshold_decay_min
]), requires_grad=self.requires_grad,
)
)
self.register_parameter(
'refractory_decay',
torch.nn.Parameter(
torch.FloatTensor([
self.p_scale * self.refractory_decay_min
]), requires_grad=self.requires_grad,
)
)
self.register_buffer(
'real_state',
torch.zeros(1, dtype=torch.float),
persistent=False
)
self.register_buffer(
'imag_state',
torch.zeros(1, dtype=torch.float),
persistent=False
)
self.register_buffer(
'threshold_state',
torch.tensor([self.threshold], dtype=torch.float),
persistent=False
)
self.register_buffer(
'refractory_state',
torch.zeros(1, dtype=torch.float),
persistent=False
)
self.clamp()
def clamp(self):
"""A function to clamp the sin decay and cosine decay parameters to be
within valid range. The user will generally not need to call this
function.
"""
# the dynamics parameters must be positive within the allowable range
# they must be inside the unit circle
with torch.no_grad():
gain = torch.sqrt(self.sin_decay**2 + self.cos_decay**2)
clamped_gain = gain.clamp(0, self.p_scale - 1)
self.sin_decay.data *= clamped_gain / gain
self.cos_decay.data *= clamped_gain / gain
# this should not be clamped to 0 because
# 0 would mean no oscillation
self.sin_decay.data.clamp_(1)
self.cos_decay.data.clamp_(0)
@property
def decay(self):
"""The decay parameter of the neuron."""
self.clamp()
return 1 - np.sqrt(
(quantize(self.sin_decay).cpu().data.numpy() / self.p_scale)**2
+ (quantize(self.cos_decay).cpu().data.numpy() / self.p_scale)**2
)
@property
def lam(self):
"""The lambda parameter of the neuron."""
return -np.log(1 - self.decay)
@property
def period(self):
"""The period of the neuron oscillation."""
return 1 / self.frequency
@property
def frequency(self):
"""The frequency of neuron oscillation."""
self.clamp()
return np.arctan2(
quantize(self.sin_decay).cpu().data.numpy(),
quantize(self.cos_decay).cpu().data.numpy()
) / 2 / np.pi
@property
def device(self):
"""The device memory (cpu/cuda) where the object lives."""
return self.sin_decay.device
@property
def cx_sin_decay(self):
"""The compartment sin decay parameter to be used for configuration."""
self.clamp()
val = quantize(self.sin_decay).cpu().data.numpy().astype(int)
if len(val) == 1:
return val[0]
return val
@property
def cx_cos_decay(self):
"""The compartment cos decay parameter to be used for configuration."""
self.clamp()
val = quantize(self.cos_decay).cpu().data.numpy().astype(int)
if len(val) == 1:
return val[0]
return val
@property
def cx_threshold_decay(self):
"""The compartment threshold decay parameter to be used for configuring
Loihi hardware."""
self.clamp()
val = quantize(self.threshold_decay).cpu().data.numpy().astype(int)
if len(val) == 1:
return val[0]
return val
@property
def cx_refractory_decay(self):
"""The compartment refractory decay parameter to be used for
configuring Loihi hardware."""
self.clamp()
val = quantize(self.refractory_decay).cpu().data.numpy().astype(int)
if len(val) == 1:
return val[0]
return val
@property
def scale(self):
"""Scale difference between slayer representation and hardware
representation of the variable states."""
return self.w_scale
def dynamics(self, input):
"""Computes the dynamics (without spiking behavior) of the neuron
instance to a complex input tuple. The input shape must match with the
neuron shape. For the first time, the neuron shape is determined from
the input automatically. It is essentially a resonator dynamics with
adaptive threshold and refractory response.
Parameters
----------
input : tuple of torch tensors
Complex input tuple of tensor, i.e. (real_input, imag_input).
Returns
-------
torch tensor
real response of the neuron.
torch tensor
imaginary response of the neuron.
torch tensor
adaptive threshold of the neuron.
torch tensor
refractory response of the neuorn.
"""
real_input, imag_input = input
if self.shape is None:
self.shape = real_input.shape[1:-1]
if len(self.shape) == 0:
raise AssertionError(
f"Expected input to have at least 3 dimensions: "
f"[Batch, Spatial dims ..., Time]. "
f"It's shape is {real_input.shape}."
)
self.num_neurons = np.prod(self.shape)
if self.shared_param is False:
if self.log_init is False:
frequency = (
(1 / self.period_max)
+ (1 / self.period_min - 1 / self.period_max)
* torch.rand(
self.shape[0], dtype=torch.float
).to(self.device)
)
else:
frequency = torch.logspace(
-np.log10(self.period_max),
-np.log10(self.period_min),
steps=self.shape[0]
).to(self.device)
decay = self.decay_min \
+ (self.decay_max - self.decay_min) * torch.rand(
self.shape[0], dtype=torch.float
).to(self.device)
sin_decay = torch.sin(2 * np.pi * frequency) * (1 - decay)
cos_decay = torch.cos(2 * np.pi * frequency) * (1 - decay)
threshold_decay = self.threshold_decay_min \
+ (self.threshold_decay_max - self.threshold_decay_min)\
* torch.rand(
self.shape[0], dtype=torch.float
).to(self.device)
refractory_decay = self.refractory_decay_min \
+ (self.refractory_decay_max - self.refractory_decay_min)\
* torch.rand(
self.shape[0], dtype=torch.float
).to(self.device)
self.sin_decay.data = self.p_scale * sin_decay
self.cos_decay.data = self.p_scale * cos_decay
self.threshold_decay.data = self.p_scale * threshold_decay
self.refractory_decay.data = self.p_scale * refractory_decay
del self.period_min
del self.period_max
del self.decay_min
del self.decay_max
del self.threshold_decay_min
del self.threshold_decay_max
del self.refractory_decay_min
del self.refractory_decay_max
else:
if real_input.shape[1:-1] != self.shape:
raise AssertionError(
f'Real input tensor shape ({real_input.shape}) '
f'does not match with Neuron shape ({self.shape}).'
)
if real_input.shape != imag_input.shape:
raise AssertionError(
f'Real input tensor shape ({imag_input.shape}) does not match '
f'with imaginary input shape ({imag_input.shape}).'
)
dtype = self.real_state.dtype
device = self.real_state.device
if self.real_state.shape[0] != real_input.shape[0]:
# persistent state cannot proceed due to change in batch dimension.
# this likely indicates change from training to testing set
self.real_state = torch.zeros(
real_input.shape[:-1]
).to(dtype).to(device)
self.imag_state = torch.zeros(
real_input.shape[:-1]
).to(dtype).to(device)
self.threshold_state = self.threshold * torch.ones(
real_input.shape[:-1]
).to(dtype).to(device)
self.refractory_state = torch.zeros(
real_input.shape[:-1]
).to(dtype).to(device)
if self.real_state.shape[1:] != self.shape:
# this means real_state and imag_state are not initialized properly
self.real_state = self.real_state * torch.ones(
real_input.shape[:-1]
).to(dtype).to(device)
self.imag_state = self.imag_state * torch.ones(
real_input.shape[:-1]
).to(dtype).to(device)
self.threshold_state = self.threshold_state * torch.ones(
real_input.shape[:-1]
).to(dtype).to(device)
self.refractory_state = self.refractory_state * torch.ones(
real_input.shape[:-1]
).to(dtype).to(device)
# clamp the values only when learning is enabled
# This means we don't need to clamp the values after gradient update.
# It is done in runtime now. Might be slow, but overhead is negligible.
if self.requires_grad is True:
self.clamp()
if self.real_norm is not None:
real_input = self.real_norm(real_input)
if self.imag_norm is not None:
imag_input = self.imag_norm(imag_input)
real, imag = resonator.dynamics(
real_input, imag_input,
quantize(self.sin_decay),
quantize(self.cos_decay),
self.real_state, self.imag_state,
self.s_scale,
debug=self.debug
)
threshold, refractory = adaptive_phase_th.dynamics(
real, imag,
self.imag_state,
self.refractory_state,
quantize(self.refractory_decay),
self.threshold_state,
quantize(self.threshold_decay),
self.threshold_step,
self.threshold,
self.s_scale,
debug=self.debug
)
if self.persistent_state is True:
with torch.no_grad():
self.real_state = real[..., -1].clone()
# self.imag_state = imag[..., -1].clone()
# this should be done post spike
self.threshold_state = threshold[..., -1].clone()
self.refractory_state = refractory[..., -1].clone()
return real, imag, threshold, refractory
def spike(self, real, imag, threshold, refractory):
"""Extracts spike points from the real and imaginary states.
Parameters
----------
real : torch tensor
real dynamics of the neuron.
imag : torch tensor
imaginary dynamics of the neuron.
threshold :torch tensor
threshold dynamics of the neuron.
refractory :torch tensor
refractory dynamics of the neuron.
Returns
-------
torch tensor
spike output
"""
# print(
# real[..., -1].item() * self.s_scale,
# imag[..., -1].item() * self.s_scale,
# self.imag_state.item() * self.s_scale
# )
spike = complex.Spike.apply(
real, imag, threshold + refractory,
self.tau_rho * TAU_RHO_MULT,
self.scale_rho * SCALE_RHO_MULT,
self.graded_spike,
self.imag_state,
# self.s_scale,
1,
)
if self.persistent_state is True:
with torch.no_grad():
# self.real_state *= (1 - spike[..., -1]).detach().clone()
self.imag_state = imag[..., -1].clone()
self.refractory_state = adaptive_phase_th.persistent_ref_state(
self.refractory_state, spike[..., -1],
self.threshold_state
).detach().clone()
self.threshold_state = adaptive_phase_th.persistent_th_state(
self.threshold_state, spike[..., -1],
self.threshold_step
).detach().clone()
if self.drop is not None:
spike = self.drop(spike)
return spike
def forward(self, input):
"""omputes the full response of the neuron instance to a complex
input tuple. The input shape must match with the neuron shape. For the
first time, the neuron shape is determined from the input
automatically.
Parameters
----------
input :
Complex input tuple of tensor, i.e. (real_input, imag_input).
Returns
-------
torch tensor
spike response of the neuron.
"""
real, imag, threshold, refractory = self.dynamics(input)
return self.spike(real, imag, threshold + refractory)
| 38.261981 | 79 | 0.545633 |
2b612f600c443bc49f88406adfda0bb11e44a1f7
| 14,711 |
py
|
Python
|
aries_cloudagent/wallet/routes.py
|
ldej/aries-cloudagent-python
|
25b7a9c08921e67b0962c434102489884ac403b2
|
[
"Apache-2.0"
] | null | null | null |
aries_cloudagent/wallet/routes.py
|
ldej/aries-cloudagent-python
|
25b7a9c08921e67b0962c434102489884ac403b2
|
[
"Apache-2.0"
] | 1 |
2020-03-06T12:11:29.000Z
|
2020-03-06T12:11:29.000Z
|
aries_cloudagent/wallet/routes.py
|
ldej/aries-cloudagent-python
|
25b7a9c08921e67b0962c434102489884ac403b2
|
[
"Apache-2.0"
] | 1 |
2020-04-30T08:22:22.000Z
|
2020-04-30T08:22:22.000Z
|
"""Wallet admin routes."""
from aiohttp import web
from aiohttp_apispec import (
docs,
# match_info_schema,
querystring_schema,
request_schema,
response_schema,
)
from marshmallow import fields
from ..ledger.base import BaseLedger
from ..ledger.endpoint_type import EndpointType
from ..ledger.error import LedgerConfigError, LedgerError
from ..messaging.models.openapi import OpenAPISchema
from ..messaging.valid import (
DID_POSTURE,
ENDPOINT,
ENDPOINT_TYPE,
INDY_CRED_DEF_ID,
INDY_DID,
INDY_RAW_PUBLIC_KEY,
)
from .base import DIDInfo, BaseWallet
from .did_posture import DIDPosture
from .error import WalletError, WalletNotFoundError
class DIDSchema(OpenAPISchema):
"""Result schema for a DID."""
did = fields.Str(description="DID of interest", **INDY_DID)
verkey = fields.Str(description="Public verification key", **INDY_RAW_PUBLIC_KEY)
posture = fields.Str(
description=(
"Whether DID is current public DID, "
"posted to ledger but not current public DID, "
"or local to the wallet"
),
**DID_POSTURE,
)
class DIDResultSchema(OpenAPISchema):
"""Result schema for a DID."""
result = fields.Nested(DIDSchema())
class DIDListSchema(OpenAPISchema):
"""Result schema for connection list."""
results = fields.List(fields.Nested(DIDSchema()), description="DID list")
class DIDEndpointWithTypeSchema(OpenAPISchema):
"""Request schema to set DID endpoint of particular type."""
did = fields.Str(description="DID of interest", required=True, **INDY_DID)
endpoint = fields.Str(
description="Endpoint to set (omit to delete)", required=False, **ENDPOINT
)
endpoint_type = fields.Str(
description=(
f"Endpoint type to set (default '{EndpointType.ENDPOINT.w3c}'); "
"affects only public or posted DIDs"
),
required=False,
**ENDPOINT_TYPE,
)
class DIDEndpointSchema(OpenAPISchema):
"""Request schema to set DID endpoint; response schema to get DID endpoint."""
did = fields.Str(description="DID of interest", required=True, **INDY_DID)
endpoint = fields.Str(
description="Endpoint to set (omit to delete)", required=False, **ENDPOINT
)
class DIDListQueryStringSchema(OpenAPISchema):
"""Parameters and validators for DID list request query string."""
did = fields.Str(description="DID of interest", required=False, **INDY_DID)
verkey = fields.Str(
description="Verification key of interest",
required=False,
**INDY_RAW_PUBLIC_KEY,
)
posture = fields.Str(
description=(
"Whether DID is current public DID, "
"posted to ledger but current public DID, "
"or local to the wallet"
),
required=False,
**DID_POSTURE,
)
class DIDQueryStringSchema(OpenAPISchema):
"""Parameters and validators for set public DID request query string."""
did = fields.Str(description="DID of interest", required=True, **INDY_DID)
class CredDefIdMatchInfoSchema(OpenAPISchema):
"""Path parameters and validators for request taking credential definition id."""
cred_def_id = fields.Str(
description="Credential identifier", required=True, **INDY_CRED_DEF_ID
)
def format_did_info(info: DIDInfo):
"""Serialize a DIDInfo object."""
if info:
return {
"did": info.did,
"verkey": info.verkey,
"posture": DIDPosture.get(info.metadata).moniker,
}
@docs(tags=["wallet"], summary="List wallet DIDs")
@querystring_schema(DIDListQueryStringSchema())
@response_schema(DIDListSchema, 200)
async def wallet_did_list(request: web.BaseRequest):
"""
Request handler for searching wallet DIDs.
Args:
request: aiohttp request object
Returns:
The DID list response
"""
context = request.app["request_context"]
wallet: BaseWallet = await context.inject(BaseWallet, required=False)
if not wallet:
raise web.HTTPForbidden(reason="No wallet available")
filter_did = request.query.get("did")
filter_verkey = request.query.get("verkey")
filter_posture = DIDPosture.get(request.query.get("posture"))
results = []
public_did_info = await wallet.get_public_did()
posted_did_infos = await wallet.get_posted_dids()
if filter_posture is DIDPosture.PUBLIC:
if (
public_did_info
and (not filter_verkey or public_did_info.verkey == filter_verkey)
and (not filter_did or public_did_info.did == filter_did)
):
results.append(format_did_info(public_did_info))
elif filter_posture is DIDPosture.POSTED:
results = []
for info in posted_did_infos:
if (not filter_verkey or info.verkey == filter_verkey) and (
not filter_did or info.did == filter_did
):
results.append(format_did_info(info))
elif filter_did:
try:
info = await wallet.get_local_did(filter_did)
except WalletError:
# badly formatted DID or record not found
info = None
if (
info
and (not filter_verkey or info.verkey == filter_verkey)
and (
filter_posture is None
or (
filter_posture is DIDPosture.WALLET_ONLY
and not info.metadata.get("posted")
)
)
):
results.append(format_did_info(info))
elif filter_verkey:
try:
info = await wallet.get_local_did_for_verkey(filter_verkey)
except WalletError:
info = None
if info and (
filter_posture is None
or (
filter_posture is DID_POSTURE.WALLET_ONLY
and not info.metadata.get("posted")
)
):
results.append(format_did_info(info))
else:
dids = await wallet.get_local_dids()
results = [
format_did_info(info)
for info in dids
if filter_posture is None
or DIDPosture.get(info.metadata) is DIDPosture.WALLET_ONLY
]
results.sort(
key=lambda info: (DIDPosture.get(info["posture"]).ordinal, info["did"])
)
return web.json_response({"results": results})
@docs(tags=["wallet"], summary="Create a local DID")
@response_schema(DIDResultSchema, 200)
async def wallet_create_did(request: web.BaseRequest):
"""
Request handler for creating a new local DID in the wallet.
Args:
request: aiohttp request object
Returns:
The DID info
"""
context = request.app["request_context"]
wallet: BaseWallet = await context.inject(BaseWallet, required=False)
if not wallet:
raise web.HTTPForbidden(reason="No wallet available")
try:
info = await wallet.create_local_did()
except WalletError as err:
raise web.HTTPBadRequest(reason=err.roll_up) from err
return web.json_response({"result": format_did_info(info)})
@docs(tags=["wallet"], summary="Fetch the current public DID")
@response_schema(DIDResultSchema, 200)
async def wallet_get_public_did(request: web.BaseRequest):
"""
Request handler for fetching the current public DID.
Args:
request: aiohttp request object
Returns:
The DID info
"""
context = request.app["request_context"]
wallet: BaseWallet = await context.inject(BaseWallet, required=False)
if not wallet:
raise web.HTTPForbidden(reason="No wallet available")
try:
info = await wallet.get_public_did()
except WalletError as err:
raise web.HTTPBadRequest(reason=err.roll_up) from err
return web.json_response({"result": format_did_info(info)})
@docs(tags=["wallet"], summary="Assign the current public DID")
@querystring_schema(DIDQueryStringSchema())
@response_schema(DIDResultSchema, 200)
async def wallet_set_public_did(request: web.BaseRequest):
"""
Request handler for setting the current public DID.
Args:
request: aiohttp request object
Returns:
The updated DID info
"""
context = request.app["request_context"]
wallet: BaseWallet = await context.inject(BaseWallet, required=False)
if not wallet:
raise web.HTTPForbidden(reason="No wallet available")
did = request.query.get("did")
if not did:
raise web.HTTPBadRequest(reason="Request query must include DID")
try:
ledger = await context.inject(BaseLedger, required=False)
if not ledger:
reason = f"No ledger available"
if not context.settings.get_value("wallet.type"):
reason += ": missing wallet-type?"
raise web.HTTPForbidden(reason=reason)
async with ledger:
if not await ledger.get_key_for_did(did):
raise web.HTTPNotFound(reason=f"DID {did} is not posted to the ledger")
did_info = await wallet.get_local_did(did)
info = await wallet.set_public_did(did)
if info:
# Publish endpoint if necessary
endpoint = did_info.metadata.get(
"endpoint", context.settings.get("default_endpoint")
)
async with ledger:
await ledger.update_endpoint_for_did(info.did, endpoint)
except WalletNotFoundError as err:
raise web.HTTPNotFound(reason=err.roll_up) from err
except (LedgerError, WalletError) as err:
raise web.HTTPBadRequest(reason=err.roll_up) from err
return web.json_response({"result": format_did_info(info)})
@docs(
tags=["wallet"], summary="Update endpoint in wallet and on ledger if posted to it"
)
@request_schema(DIDEndpointWithTypeSchema)
async def wallet_set_did_endpoint(request: web.BaseRequest):
"""
Request handler for setting an endpoint for a DID.
Args:
request: aiohttp request object
"""
context = request.app["request_context"]
wallet: BaseWallet = await context.inject(BaseWallet, required=False)
if not wallet:
raise web.HTTPForbidden(reason="No wallet available")
body = await request.json()
did = body["did"]
endpoint = body.get("endpoint")
endpoint_type = EndpointType.get(
body.get("endpoint_type", EndpointType.ENDPOINT.w3c)
)
try:
ledger: BaseLedger = await context.inject(BaseLedger, required=False)
await wallet.set_did_endpoint(did, endpoint, ledger, endpoint_type)
except WalletNotFoundError as err:
raise web.HTTPNotFound(reason=err.roll_up) from err
except LedgerConfigError as err:
raise web.HTTPForbidden(reason=err.roll_up) from err
except (LedgerError, WalletError) as err:
raise web.HTTPBadRequest(reason=err.roll_up) from err
return web.json_response({})
@docs(tags=["wallet"], summary="Query DID endpoint in wallet")
@querystring_schema(DIDQueryStringSchema())
@response_schema(DIDEndpointSchema, 200)
async def wallet_get_did_endpoint(request: web.BaseRequest):
"""
Request handler for getting the current DID endpoint from the wallet.
Args:
request: aiohttp request object
Returns:
The updated DID info
"""
context = request.app["request_context"]
wallet: BaseWallet = await context.inject(BaseWallet, required=False)
if not wallet:
raise web.HTTPForbidden(reason="No wallet available")
did = request.query.get("did")
if not did:
raise web.HTTPBadRequest(reason="Request query must include DID")
try:
did_info = await wallet.get_local_did(did)
endpoint = did_info.metadata.get("endpoint")
except WalletNotFoundError as err:
raise web.HTTPNotFound(reason=err.roll_up) from err
except WalletError as err:
raise web.HTTPBadRequest(reason=err.roll_up) from err
return web.json_response({"did": did, "endpoint": endpoint})
@docs(tags=["wallet"], summary="Rotate keypair for a DID not posted to the ledger")
@querystring_schema(DIDQueryStringSchema())
async def wallet_rotate_did_keypair(request: web.BaseRequest):
"""
Request handler for rotating local DID keypair.
Args:
request: aiohttp request object
Returns:
An empty JSON response
"""
context = request.app["request_context"]
wallet: BaseWallet = await context.inject(BaseWallet, required=False)
if not wallet:
raise web.HTTPForbidden(reason="No wallet available")
did = request.query.get("did")
if not did:
raise web.HTTPBadRequest(reason="Request query must include DID")
try:
did_info = await wallet.get_local_did(did)
if did_info.metadata.get("posted", False):
# call from ledger API instead to propagate through ledger NYM transaction
raise web.HTTPBadRequest(reason=f"DID {did} is posted to the ledger")
await wallet.rotate_did_keypair_start(did) # do not take seed over the wire
await wallet.rotate_did_keypair_apply(did)
except WalletNotFoundError as err:
raise web.HTTPNotFound(reason=err.roll_up) from err
except WalletError as err:
raise web.HTTPBadRequest(reason=err.roll_up) from err
return web.json_response({})
async def register(app: web.Application):
"""Register routes."""
app.add_routes(
[
web.get("/wallet/did", wallet_did_list, allow_head=False),
web.post("/wallet/did/create", wallet_create_did),
web.get("/wallet/did/public", wallet_get_public_did, allow_head=False),
web.post("/wallet/did/public", wallet_set_public_did),
web.post("/wallet/set-did-endpoint", wallet_set_did_endpoint),
web.get(
"/wallet/get-did-endpoint", wallet_get_did_endpoint, allow_head=False
),
web.patch("/wallet/did/local/rotate-keypair", wallet_rotate_did_keypair),
]
)
def post_process_routes(app: web.Application):
"""Amend swagger API."""
# Add top-level tags description
if "tags" not in app._state["swagger_dict"]:
app._state["swagger_dict"]["tags"] = []
app._state["swagger_dict"]["tags"].append(
{
"name": "wallet",
"description": "DID and tag policy management",
"externalDocs": {
"description": "Design",
"url": (
"https://github.com/hyperledger/indy-sdk/tree/"
"master/docs/design/003-wallet-storage"
),
},
}
)
| 32.331868 | 87 | 0.653864 |
18fa58dd610ccbb4d20566e11d309b317b29cb93
| 3,692 |
py
|
Python
|
configs/top_down/resnet/jhmdb/res50_jhmdb_sub1_256x256.py
|
irvingzhang0512/mmpose
|
17557522ce3e41f830973079c5b4321935c41439
|
[
"Apache-2.0"
] | null | null | null |
configs/top_down/resnet/jhmdb/res50_jhmdb_sub1_256x256.py
|
irvingzhang0512/mmpose
|
17557522ce3e41f830973079c5b4321935c41439
|
[
"Apache-2.0"
] | null | null | null |
configs/top_down/resnet/jhmdb/res50_jhmdb_sub1_256x256.py
|
irvingzhang0512/mmpose
|
17557522ce3e41f830973079c5b4321935c41439
|
[
"Apache-2.0"
] | null | null | null |
log_level = 'INFO'
load_from = 'https://download.openmmlab.com/mmpose/top_down/resnet/res50_mpii_256x256-418ffc88_20200812.pth' # noqa: E501
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=1)
evaluation = dict(interval=1, metric=['PCK', 'tPCK'], key_indicator='Mean PCK')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[8, 15])
total_epochs = 20
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=15,
dataset_joints=15,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
],
inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14])
# model settings
model = dict(
type='TopDown',
pretrained=None,
backbone=dict(type='ResNet', depth=50),
keypoint_head=dict(
type='TopDownSimpleHead',
in_channels=2048,
out_channels=channel_cfg['num_output_channels'],
),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11),
loss_pose=dict(type='JointsMSELoss', use_target_weight=True))
data_cfg = dict(
image_size=[256, 256],
heatmap_size=[64, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=True,
det_bbox_thr=0.0,
bbox_file='',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=30,
scale_factor=0.25),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=2),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=[
'img',
],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox', 'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/jhmdb'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
train=dict(
type='TopDownJhmdbDataset',
ann_file=f'{data_root}/annotations/Sub1_train.json',
img_prefix=f'{data_root}/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='TopDownJhmdbDataset',
ann_file=f'{data_root}/annotations/Sub1_test.json',
img_prefix=f'{data_root}/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='TopDownJhmdbDataset',
ann_file=f'{data_root}/annotations/Sub1_test.json',
img_prefix=f'{data_root}/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
| 27.147059 | 122 | 0.618364 |
795e9e4f2493c0c3db5247271e0247273b09c0e8
| 428 |
py
|
Python
|
tests/integrationTests/tests/json/__init__.py
|
hifiadi/Submitty
|
62a8239313cff7e3f841ff66aeda6b0557e9c15b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/integrationTests/tests/json/__init__.py
|
hifiadi/Submitty
|
62a8239313cff7e3f841ff66aeda6b0557e9c15b
|
[
"BSD-3-Clause"
] | null | null | null |
tests/integrationTests/tests/json/__init__.py
|
hifiadi/Submitty
|
62a8239313cff7e3f841ff66aeda6b0557e9c15b
|
[
"BSD-3-Clause"
] | null | null | null |
# Necessary imports. Provides library functions to ease writing tests.
from lib import testcase
@testcase
def correct_json_output(test):
test.run_validator() # Runs validator.out with default arguments
# Check differences on output files. Files within the data directory are compared with
# their counterparts in the validation directory.
test.json_diff("test02_0_diff.json")
test.json_diff("results.json")
| 38.909091 | 90 | 0.775701 |
9772dc66e1e665bee12a41cd09ece76252d51502
| 511 |
py
|
Python
|
pycritty/commands/__init__.py
|
binRick/pycritty
|
ae27e61fe597c22e6830d62533e11d64bf06a3ae
|
[
"MIT"
] | null | null | null |
pycritty/commands/__init__.py
|
binRick/pycritty
|
ae27e61fe597c22e6830d62533e11d64bf06a3ae
|
[
"MIT"
] | null | null | null |
pycritty/commands/__init__.py
|
binRick/pycritty
|
ae27e61fe597c22e6830d62533e11d64bf06a3ae
|
[
"MIT"
] | null | null | null |
from .pycritty import Pycritty
from .ls import ListResource
from .save import SaveConfig
from .profile import LoadProfile
from .create import CreateBinary
from .run import RunConfig
from .ssh import SSH
from .load import LoadConfig
from .install import Install
from .rm import Remove
subcommands = {
'ls': ListResource,
'save': SaveConfig,
'run': RunConfig,
'ssh': SSH,
'binary': CreateBinary,
'load': LoadConfig,
'install': Install,
'rm': Remove,
'profile': LoadProfile,
}
| 22.217391 | 32 | 0.710372 |
694e5d10fde04e63c1766cf32d4e51d618737be8
| 25,614 |
py
|
Python
|
airflow/providers/google/cloud/operators/compute.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 3 |
2021-01-29T20:33:56.000Z
|
2021-08-06T17:35:16.000Z
|
airflow/providers/google/cloud/operators/compute.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 210 |
2021-07-17T00:25:52.000Z
|
2021-12-29T00:44:48.000Z
|
airflow/providers/google/cloud/operators/compute.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 3 |
2020-06-30T02:38:17.000Z
|
2022-01-19T06:14:08.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Compute Engine operators."""
from copy import deepcopy
from typing import Any, Dict, List, Optional, Sequence, Union
from googleapiclient.errors import HttpError
from json_merge_patch import merge
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.compute import ComputeEngineHook
from airflow.providers.google.cloud.utils.field_sanitizer import GcpBodyFieldSanitizer
from airflow.providers.google.cloud.utils.field_validator import GcpBodyFieldValidator
class ComputeEngineBaseOperator(BaseOperator):
"""Abstract base operator for Google Compute Engine operators to inherit from."""
def __init__(
self,
*,
zone: str,
resource_id: str,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v1',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.project_id = project_id
self.zone = zone
self.resource_id = resource_id
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.impersonation_chain = impersonation_chain
self._validate_inputs()
super().__init__(**kwargs)
def _validate_inputs(self) -> None:
if self.project_id == '':
raise AirflowException("The required parameter 'project_id' is missing")
if not self.zone:
raise AirflowException("The required parameter 'zone' is missing")
if not self.resource_id:
raise AirflowException("The required parameter 'resource_id' is missing")
def execute(self, context):
pass
class ComputeEngineStartInstanceOperator(ComputeEngineBaseOperator):
"""
Starts an instance in Google Compute Engine.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineStartInstanceOperator`
:param zone: Google Cloud zone where the instance exists.
:type zone: str
:param resource_id: Name of the Compute Engine instance resource.
:type resource_id: str
:param project_id: Optional, Google Cloud Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:type project_id: str
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:type gcp_conn_id: str
:param api_version: Optional, API version used (for example v1 - or beta). Defaults
to v1.
:type api_version: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
# [START gce_instance_start_template_fields]
template_fields = (
'project_id',
'zone',
'resource_id',
'gcp_conn_id',
'api_version',
'impersonation_chain',
)
# [END gce_instance_start_template_fields]
def execute(self, context) -> None:
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
return hook.start_instance(zone=self.zone, resource_id=self.resource_id, project_id=self.project_id)
class ComputeEngineStopInstanceOperator(ComputeEngineBaseOperator):
"""
Stops an instance in Google Compute Engine.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineStopInstanceOperator`
:param zone: Google Cloud zone where the instance exists.
:type zone: str
:param resource_id: Name of the Compute Engine instance resource.
:type resource_id: str
:param project_id: Optional, Google Cloud Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:type project_id: str
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:type gcp_conn_id: str
:param api_version: Optional, API version used (for example v1 - or beta). Defaults
to v1.
:type api_version: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
# [START gce_instance_stop_template_fields]
template_fields = (
'project_id',
'zone',
'resource_id',
'gcp_conn_id',
'api_version',
'impersonation_chain',
)
# [END gce_instance_stop_template_fields]
def execute(self, context) -> None:
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
hook.stop_instance(zone=self.zone, resource_id=self.resource_id, project_id=self.project_id)
SET_MACHINE_TYPE_VALIDATION_SPECIFICATION = [
dict(name="machineType", regexp="^.+$"),
]
class ComputeEngineSetMachineTypeOperator(ComputeEngineBaseOperator):
"""
Changes the machine type for a stopped instance to the machine type specified in
the request.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineSetMachineTypeOperator`
:param zone: Google Cloud zone where the instance exists.
:type zone: str
:param resource_id: Name of the Compute Engine instance resource.
:type resource_id: str
:param body: Body required by the Compute Engine setMachineType API, as described in
https://cloud.google.com/compute/docs/reference/rest/v1/instances/setMachineType#request-body
:type body: dict
:param project_id: Optional, Google Cloud Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:type project_id: str
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:type gcp_conn_id: str
:param api_version: Optional, API version used (for example v1 - or beta). Defaults
to v1.
:type api_version: str
:param validate_body: Optional, If set to False, body validation is not performed.
Defaults to False.
:type validate_body: bool
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
# [START gce_instance_set_machine_type_template_fields]
template_fields = (
'project_id',
'zone',
'resource_id',
'body',
'gcp_conn_id',
'api_version',
'impersonation_chain',
)
# [END gce_instance_set_machine_type_template_fields]
def __init__(
self,
*,
zone: str,
resource_id: str,
body: dict,
project_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v1',
validate_body: bool = True,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.body = body
self._field_validator = None # type: Optional[GcpBodyFieldValidator]
if validate_body:
self._field_validator = GcpBodyFieldValidator(
SET_MACHINE_TYPE_VALIDATION_SPECIFICATION, api_version=api_version
)
super().__init__(
project_id=project_id,
zone=zone,
resource_id=resource_id,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _validate_all_body_fields(self) -> None:
if self._field_validator:
self._field_validator.validate(self.body)
def execute(self, context) -> None:
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self._validate_all_body_fields()
return hook.set_machine_type(
zone=self.zone, resource_id=self.resource_id, body=self.body, project_id=self.project_id
)
GCE_INSTANCE_TEMPLATE_VALIDATION_PATCH_SPECIFICATION = [
dict(name="name", regexp="^.+$"),
dict(name="description", optional=True),
dict(
name="properties",
type='dict',
optional=True,
fields=[
dict(name="description", optional=True),
dict(name="tags", optional=True, fields=[dict(name="items", optional=True)]),
dict(name="machineType", optional=True),
dict(name="canIpForward", optional=True),
dict(name="networkInterfaces", optional=True), # not validating deeper
dict(name="disks", optional=True), # not validating the array deeper
dict(
name="metadata",
optional=True,
fields=[
dict(name="fingerprint", optional=True),
dict(name="items", optional=True),
dict(name="kind", optional=True),
],
),
dict(name="serviceAccounts", optional=True), # not validating deeper
dict(
name="scheduling",
optional=True,
fields=[
dict(name="onHostMaintenance", optional=True),
dict(name="automaticRestart", optional=True),
dict(name="preemptible", optional=True),
dict(name="nodeAffinities", optional=True), # not validating deeper
],
),
dict(name="labels", optional=True),
dict(name="guestAccelerators", optional=True), # not validating deeper
dict(name="minCpuPlatform", optional=True),
],
),
] # type: List[Dict[str, Any]]
GCE_INSTANCE_TEMPLATE_FIELDS_TO_SANITIZE = [
"kind",
"id",
"name",
"creationTimestamp",
"properties.disks.sha256",
"properties.disks.kind",
"properties.disks.sourceImageEncryptionKey.sha256",
"properties.disks.index",
"properties.disks.licenses",
"properties.networkInterfaces.kind",
"properties.networkInterfaces.accessConfigs.kind",
"properties.networkInterfaces.name",
"properties.metadata.kind",
"selfLink",
]
class ComputeEngineCopyInstanceTemplateOperator(ComputeEngineBaseOperator):
"""
Copies the instance template, applying specified changes.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineCopyInstanceTemplateOperator`
:param resource_id: Name of the Instance Template
:type resource_id: str
:param body_patch: Patch to the body of instanceTemplates object following rfc7386
PATCH semantics. The body_patch content follows
https://cloud.google.com/compute/docs/reference/rest/v1/instanceTemplates
Name field is required as we need to rename the template,
all the other fields are optional. It is important to follow PATCH semantics
- arrays are replaced fully, so if you need to update an array you should
provide the whole target array as patch element.
:type body_patch: dict
:param project_id: Optional, Google Cloud Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:type project_id: str
:param request_id: Optional, unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new instance template again).
It should be in UUID format as defined in RFC 4122.
:type request_id: str
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:type gcp_conn_id: str
:param api_version: Optional, API version used (for example v1 - or beta). Defaults
to v1.
:type api_version: str
:param validate_body: Optional, If set to False, body validation is not performed.
Defaults to False.
:type validate_body: bool
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
# [START gce_instance_template_copy_operator_template_fields]
template_fields = (
'project_id',
'resource_id',
'request_id',
'gcp_conn_id',
'api_version',
'impersonation_chain',
)
# [END gce_instance_template_copy_operator_template_fields]
def __init__(
self,
*,
resource_id: str,
body_patch: dict,
project_id: Optional[str] = None,
request_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
api_version: str = 'v1',
validate_body: bool = True,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.body_patch = body_patch
self.request_id = request_id
self._field_validator = None # Optional[GcpBodyFieldValidator]
if 'name' not in self.body_patch:
raise AirflowException(
"The body '{}' should contain at least "
"name for the new operator in the 'name' field".format(body_patch)
)
if validate_body:
self._field_validator = GcpBodyFieldValidator(
GCE_INSTANCE_TEMPLATE_VALIDATION_PATCH_SPECIFICATION, api_version=api_version
)
self._field_sanitizer = GcpBodyFieldSanitizer(GCE_INSTANCE_TEMPLATE_FIELDS_TO_SANITIZE)
super().__init__(
project_id=project_id,
zone='global',
resource_id=resource_id,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _validate_all_body_fields(self) -> None:
if self._field_validator:
self._field_validator.validate(self.body_patch)
def execute(self, context) -> dict:
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self._validate_all_body_fields()
try:
# Idempotence check (sort of) - we want to check if the new template
# is already created and if is, then we assume it was created by previous run
# of CopyTemplate operator - we do not check if content of the template
# is as expected. Templates are immutable so we cannot update it anyway
# and deleting/recreating is not worth the hassle especially
# that we cannot delete template if it is already used in some Instance
# Group Manager. We assume success if the template is simply present
existing_template = hook.get_instance_template(
resource_id=self.body_patch['name'], project_id=self.project_id
)
self.log.info(
"The %s template already existed. It was likely created by previous run of the operator. "
"Assuming success.",
existing_template,
)
return existing_template
except HttpError as e:
# We actually expect to get 404 / Not Found here as the template should
# not yet exist
if not e.resp.status == 404:
raise e
old_body = hook.get_instance_template(resource_id=self.resource_id, project_id=self.project_id)
new_body = deepcopy(old_body)
self._field_sanitizer.sanitize(new_body)
new_body = merge(new_body, self.body_patch)
self.log.info("Calling insert instance template with updated body: %s", new_body)
hook.insert_instance_template(body=new_body, request_id=self.request_id, project_id=self.project_id)
return hook.get_instance_template(resource_id=self.body_patch['name'], project_id=self.project_id)
class ComputeEngineInstanceGroupUpdateManagerTemplateOperator(ComputeEngineBaseOperator):
"""
Patches the Instance Group Manager, replacing source template URL with the
destination one. API V1 does not have update/patch operations for Instance
Group Manager, so you must use beta or newer API version. Beta is the default.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ComputeEngineInstanceGroupUpdateManagerTemplateOperator`
:param resource_id: Name of the Instance Group Manager
:type resource_id: str
:param zone: Google Cloud zone where the Instance Group Manager exists.
:type zone: str
:param source_template: URL of the template to replace.
:type source_template: str
:param destination_template: URL of the target template.
:type destination_template: str
:param project_id: Optional, Google Cloud Project ID where the Compute
Engine Instance exists. If set to None or missing, the default project_id from the Google Cloud
connection is used.
:type project_id: str
:param request_id: Optional, unique request_id that you might add to achieve
full idempotence (for example when client call times out repeating the request
with the same request id will not create a new instance template again).
It should be in UUID format as defined in RFC 4122.
:type request_id: str
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:type gcp_conn_id: str
:param api_version: Optional, API version used (for example v1 - or beta). Defaults
to v1.
:type api_version: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
# [START gce_igm_update_template_operator_template_fields]
template_fields = (
'project_id',
'resource_id',
'zone',
'request_id',
'source_template',
'destination_template',
'gcp_conn_id',
'api_version',
'impersonation_chain',
)
# [END gce_igm_update_template_operator_template_fields]
def __init__(
self,
*,
resource_id: str,
zone: str,
source_template: str,
destination_template: str,
project_id: Optional[str] = None,
update_policy: Optional[Dict[str, Any]] = None,
request_id: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
api_version='beta',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
self.zone = zone
self.source_template = source_template
self.destination_template = destination_template
self.request_id = request_id
self.update_policy = update_policy
self._change_performed = False
if api_version == 'v1':
raise AirflowException(
"Api version v1 does not have update/patch "
"operations for Instance Group Managers. Use beta"
" api version or above"
)
super().__init__(
project_id=project_id,
zone=self.zone,
resource_id=resource_id,
gcp_conn_id=gcp_conn_id,
api_version=api_version,
impersonation_chain=impersonation_chain,
**kwargs,
)
def _possibly_replace_template(self, dictionary: dict) -> None:
if dictionary.get('instanceTemplate') == self.source_template:
dictionary['instanceTemplate'] = self.destination_template
self._change_performed = True
def execute(self, context) -> Optional[bool]:
hook = ComputeEngineHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
old_instance_group_manager = hook.get_instance_group_manager(
zone=self.zone, resource_id=self.resource_id, project_id=self.project_id
)
patch_body = {}
if 'versions' in old_instance_group_manager:
patch_body['versions'] = old_instance_group_manager['versions']
if 'instanceTemplate' in old_instance_group_manager:
patch_body['instanceTemplate'] = old_instance_group_manager['instanceTemplate']
if self.update_policy:
patch_body['updatePolicy'] = self.update_policy
self._possibly_replace_template(patch_body)
if 'versions' in patch_body:
for version in patch_body['versions']:
self._possibly_replace_template(version)
if self._change_performed or self.update_policy:
self.log.info("Calling patch instance template with updated body: %s", patch_body)
return hook.patch_instance_group_manager(
zone=self.zone,
resource_id=self.resource_id,
body=patch_body,
request_id=self.request_id,
project_id=self.project_id,
)
else:
# Idempotence achieved
return True
| 42.477612 | 108 | 0.671274 |
6bfa7f2aaf82556910573e9d2d8ec775743def5f
| 634 |
py
|
Python
|
examples/open-floating-on-ws.py
|
Hi-Angel/i3ipc-python
|
93a8f0c39e5082751824b40b51680bcbc16bfce2
|
[
"BSD-3-Clause"
] | null | null | null |
examples/open-floating-on-ws.py
|
Hi-Angel/i3ipc-python
|
93a8f0c39e5082751824b40b51680bcbc16bfce2
|
[
"BSD-3-Clause"
] | null | null | null |
examples/open-floating-on-ws.py
|
Hi-Angel/i3ipc-python
|
93a8f0c39e5082751824b40b51680bcbc16bfce2
|
[
"BSD-3-Clause"
] | 1 |
2020-04-24T04:23:12.000Z
|
2020-04-24T04:23:12.000Z
|
#!/usr/bin/env python3
# This example shows how to make any window that opens on a workspace floating
# All workspaces that start with a string in this list will have their windows
# open floating
FLOATING_WORKSPACES = [ '3' ]
def is_ws_floating(name):
for floating_ws in FLOATING_WORKSPACES:
if name.startswith(floating_ws):
return True
return False
import i3ipc
i3 = i3ipc.Connection()
def on_window_open(i3, e):
ws = i3.get_tree().find_focused().workspace()
if is_ws_floating(ws.props.name):
e.container.command('floating toggle')
i3.on('window::new', on_window_open)
i3.main()
| 22.642857 | 78 | 0.709779 |
afe5a0a4d26a3cc5584f0a93bcfb36e2e990cdec
| 999 |
py
|
Python
|
tests/test_logger.py
|
huykingsofm/hks_pylib
|
d73a896a395df301ef8082a358ec8e23f7bc708a
|
[
"MIT"
] | 2 |
2021-04-06T07:01:27.000Z
|
2021-07-30T11:08:59.000Z
|
tests/test_logger.py
|
huykingsofm/hks_pylib
|
d73a896a395df301ef8082a358ec8e23f7bc708a
|
[
"MIT"
] | null | null | null |
tests/test_logger.py
|
huykingsofm/hks_pylib
|
d73a896a395df301ef8082a358ec8e23f7bc708a
|
[
"MIT"
] | null | null | null |
import threading
from hks_pylib.logger.logger_generator import InvisibleLoggerGenerator, StandardLoggerGenerator
from hks_pylib.logger.logger import Display, console_output
from hks_pylib.logger.standard import StdLevels, StdUsers
from hks_pylib.logger import acprint
def test_logger():
invisible_logger_generator = InvisibleLoggerGenerator()
standard_logger_generator = StandardLoggerGenerator("tests/test_logger.log")
log = invisible_logger_generator.generate(
"Name", {StdUsers.USER: [StdLevels.INFO], StdUsers.DEV: Display.ALL})
log(StdUsers.USER, StdLevels.INFO, "hks_pylib")
log(StdUsers.DEV, StdLevels.DEBUG, "huykingsofm")
log = standard_logger_generator.generate(
"Name", {StdUsers.USER: [StdLevels.INFO], StdUsers.DEV: Display.ALL})
log(StdUsers.USER, StdLevels.INFO, "hks_pylib")
log(StdUsers.DEV, StdLevels.DEBUG, "huykingsofm")
acprint("huythongminh", end="\n")
if __name__ == "__main__":
test_logger()
| 41.625 | 96 | 0.745746 |
176b55eae40006394d2987e0a8a8c45b8ca74988
| 2,522 |
py
|
Python
|
src/clustaar/webhook/middlewares/validate_signature_middleware.py
|
Clustaar/clustaar.webhook
|
81756ea7168e3281e17844829e6d98d89d372e54
|
[
"MIT"
] | 4 |
2018-06-08T13:20:33.000Z
|
2022-01-03T17:00:13.000Z
|
src/clustaar/webhook/middlewares/validate_signature_middleware.py
|
Clustaar/clustaar.webhook
|
81756ea7168e3281e17844829e6d98d89d372e54
|
[
"MIT"
] | 1 |
2020-08-18T16:10:04.000Z
|
2020-08-18T16:10:04.000Z
|
src/clustaar/webhook/middlewares/validate_signature_middleware.py
|
Clustaar/clustaar.webhook
|
81756ea7168e3281e17844829e6d98d89d372e54
|
[
"MIT"
] | 2 |
2018-06-12T06:06:05.000Z
|
2021-03-04T16:55:37.000Z
|
import falcon
import hashlib
import hmac
SIGNATURE_HEADER = "X-Hub-Signature"
class ValidateSignatureMiddleware(object):
"""Middleware validating the request signature to
prevent request forgery.
If signature is invalid a HTTP 400 status code is returned.
"""
def __init__(self, private_key):
"""
Args:
private_key (str): private used for validating payload signature
"""
self._private_key = private_key.encode()
self._hash_functions = {"sha1": hashlib.sha1}
def process_resource(self, req, resp, resource, params):
"""Falcon callback.
Raise error if signature is invalid.
Args:
req: the HTTP request
resp: the HTTP response
"""
signature = req.get_header(SIGNATURE_HEADER)
if signature:
hash_and_value = signature.split("=")
if len(hash_and_value) != 2:
self._raise("The request's signature format is invalid.")
hash_function, value = hash_and_value
self._validate_signature(req, value, hash_function)
else:
self._raise("The request's signature is missing.")
def _validate_signature(self, req, signature, hash_function_name):
"""Validates that signature is correct.
If signature is invalid it raises an HTTPInvalidHeader error.
Args:
req: the HTTP request
signature (str): signature to validate
hash_function_name (str): hash algorithm used to hash payload
"""
hash_function = self._hash_functions.get(hash_function_name)
if not hash_function:
self._raise(
"The request's signature hash function is invalid "
"(should be one of %s)." % list(self._hash_functions.keys())
)
signed_headers = ("date",)
buffer = "\n".join(
["%s=%s" % (header, req.get_header(header)) for header in signed_headers]
)
buffer += "\n" + req.body.decode("utf-8")
expected_signature = hmac.new(self._private_key, buffer.encode(), hash_function).hexdigest()
if expected_signature != signature:
self._raise("The request's signature is invalid.")
def _raise(self, description):
"""Raises a HTTPInvalidHeader exception with descrition.
Args:
descrition (str): error descrition
"""
raise falcon.HTTPInvalidHeader(description, SIGNATURE_HEADER)
| 34.081081 | 100 | 0.619746 |
77358bad2ba634d05fa0cd9a53c42dff3de53077
| 1,216 |
py
|
Python
|
tests/test_merge_v4.py
|
mrtolkien/riotwatcher_dto
|
777455c45159f177d3a7ba3956043ff26f625c30
|
[
"MIT"
] | 8 |
2020-10-01T18:01:15.000Z
|
2022-02-22T22:51:30.000Z
|
tests/test_merge_v4.py
|
mrtolkien/riotwatcher_dto
|
777455c45159f177d3a7ba3956043ff26f625c30
|
[
"MIT"
] | 2 |
2021-12-15T23:02:02.000Z
|
2022-01-25T06:07:53.000Z
|
tests/test_merge_v4.py
|
mrtolkien/riotwatcher_dto
|
777455c45159f177d3a7ba3956043ff26f625c30
|
[
"MIT"
] | 4 |
2020-07-30T23:24:06.000Z
|
2022-02-21T19:26:01.000Z
|
import os
import lol_dto
import roleml
from riot_transmute import match_to_game, match_timeline_to_game
from riot_transmute.merge_match_and_timeline import (
merge_games_from_riot_match_and_timeline,
)
def test_full(match_v4_dto, timeline_game_id_platform_id):
timeline, game_id, platform_id = timeline_game_id_platform_id
match_dto, timeline = roleml.fix_game(
match_v4_dto,
timeline,
True,
)
game_match = match_to_game(match_dto)
game_timeline = match_timeline_to_game(timeline, game_id, platform_id)
game_full = merge_games_from_riot_match_and_timeline(game_match, game_timeline)
lol_dto.utilities.dump_json(game_full, os.path.join("examples", "game_merged.json"))
assert game_full.sources.riotLolApi.gameId == game_id
assert game_full.teams.BLUE.players[0].snapshots.__len__() > 0
assert game_full.duration
assert game_full.patch
return game_full
def test_custom_game(custom_match_and_timeline):
match, timeline = custom_match_and_timeline
game = match_to_game(match)
game_timeline = match_timeline_to_game(timeline, 4676184349, "EUW1")
assert merge_games_from_riot_match_and_timeline(game, game_timeline)
| 28.27907 | 88 | 0.780428 |
73d3c78fc5fe15c84e1042aa75a2c23381f1743f
| 381 |
py
|
Python
|
Prime Genterator.py
|
cjw0621/Growing-Python
|
50d754ab0a9d32f2284b3732df046f1ce849eca4
|
[
"Unlicense"
] | 1 |
2021-11-21T01:51:30.000Z
|
2021-11-21T01:51:30.000Z
|
Prime Genterator.py
|
cjw0621/Growing-Python
|
50d754ab0a9d32f2284b3732df046f1ce849eca4
|
[
"Unlicense"
] | null | null | null |
Prime Genterator.py
|
cjw0621/Growing-Python
|
50d754ab0a9d32f2284b3732df046f1ce849eca4
|
[
"Unlicense"
] | null | null | null |
def isPrime(x):
if x < 2:
return False
elif x == 2:
return True
for n in range(2, x):
if x % n ==0:
return
return True
def primeGenerator(a, b):
#your code goes here
for i in range(a, b):
if isPrime(i):
yield i
f = int(input())
t = int(input())
print(list(primeGenerator(f, t)))
| 19.05 | 33 | 0.480315 |
92252568c48360eeea114393f2168a63e0c79d6d
| 1,280 |
py
|
Python
|
python/tHome/sma/test/dcPower.py
|
ZigmundRat/T-Home
|
5dc8689f52d87dac890051e540b338b009293ced
|
[
"BSD-2-Clause"
] | 18 |
2016-04-17T19:39:28.000Z
|
2020-11-19T06:55:20.000Z
|
python/tHome/sma/test/dcPower.py
|
ZigmundRat/T-Home
|
5dc8689f52d87dac890051e540b338b009293ced
|
[
"BSD-2-Clause"
] | 6 |
2016-10-31T13:53:45.000Z
|
2019-03-20T20:47:03.000Z
|
python/tHome/sma/test/dcPower.py
|
ZigmundRat/T-Home
|
5dc8689f52d87dac890051e540b338b009293ced
|
[
"BSD-2-Clause"
] | 12 |
2016-10-31T12:29:08.000Z
|
2021-12-28T12:18:28.000Z
|
import unittest
from FakeSocket import FakeSocket
import tHome as T
#===========================================================================
#===========================================================================
class TestDcPower ( T.util.test.Case ) :
def test_dcPower( self ):
reply = """
53 4D 41 00 00 04 02 A0 00 00
00 01 00 5E 00 10 60 65 17 90
7D 00 AB 94 40 3B 00 A0 F7 00
E0 27 06 72 00 00 00 00 00 00
0E 80 01 02 80 53 00 00 00 00
01 00 00 00 01 1E 25 40 85 22
AF 53 13 08 00 00 13 08 00 00
13 08 00 00 13 08 00 00 01 00
00 00 02 1E 25 40 85 22 AF 53
21 08 00 00 21 08 00 00 21 08
00 00 21 08 00 00 01 00 00 00
00 00 00 00
"""
l = T.sma.Link( "fake", connect=False )
try:
l.socket = FakeSocket( T.util.hex.toBytes( reply ) )
o1 = l.dcPower()
l.decode = False
buf, decoder = l.dcPower()
o2 = decoder( buf )
finally:
l.socket = None
right = T.util.Data(
dcPower1 = 2067.0,
dcPower2 = 2081.0,
)
print o1
for k in right.keys():
r = right[k]
self.eq( getattr( o1, k ), r, k )
self.eq( getattr( o2, k ), r, k )
#===========================================================================
| 26.666667 | 76 | 0.452344 |
9b855f9637487340a666d4bfa656574793bcc8c9
| 5,906 |
py
|
Python
|
bahai_writings/documentengine.py
|
jlbradley1844/python-stuff
|
8cc65115600f15f11577b3392161b18aedafc663
|
[
"Apache-2.0"
] | null | null | null |
bahai_writings/documentengine.py
|
jlbradley1844/python-stuff
|
8cc65115600f15f11577b3392161b18aedafc663
|
[
"Apache-2.0"
] | null | null | null |
bahai_writings/documentengine.py
|
jlbradley1844/python-stuff
|
8cc65115600f15f11577b3392161b18aedafc663
|
[
"Apache-2.0"
] | null | null | null |
from docmetadata import DOCUMENT_INDEX, CATEGORIES
from documentcollection import DocumentCollection
from docutility import DocUtility
class DocumentEngine(object):
"""
The "engine" in the search framework. This is a management object
that is used to dispatch searches and retrieve document information
from those searches.
"""
def __init__(self, nlp_engine = None):
"""
sets up the document management objects.
:param nlp_engine: if passed in, passes this instance of the NLP document
engine to doc_collection upon creation. This is used so that multiple
DocumentCollection objects can share the same large NLP object.
"""
self.doc_collection = DocumentCollection(DOCUMENT_INDEX, nlp_engine)
self.session_track = {}
def get_categories(self):
"""
Returns the category metadata array
"""
return CATEGORIES
def get_document_tags(self, category=None):
"""
Returns an array of tags, associated with the category each is
associated with
:param optional. If supplied, returns only those tags that match the category
"""
if category is None:
tag_coll = [{
"tag": key,
"category": DOCUMENT_INDEX[key]["category"]
} for key in DOCUMENT_INDEX]
else:
tag_coll = [{
"tag": key,
"category": category
} for key in DOCUMENT_INDEX
if DOCUMENT_INDEX[key]["category"] == category]
return tag_coll
def get_metadata(self, tag):
"""
Returns the document metadata associated with a tag. Returns the
hardcoded lookup values from DOC_METADATA
"""
return DOC_METADATA[tag]
def simple_search(self, tokens, document_tags=None, category=None):
"""
Returns a document and a reference inside the document with
those matches.
:param tokens: array of tokens to match. N.B. inital implemenation
to be just ONE SINGLE WORD. ADDITIONAL TOKENS WILL BE IGNORED.
:param documen_ttags: optional. If specified, indicates the set of
documents the search is applied to. If not specified, matches all.
:param category: optional. If specified, indicates the category of
documents the search is applied to. Only used if documenttags=None
:returns array of json-style objects; each object consisting of
"tag" - which document it was found in
"selection" - sentence within which the token(s) found
"scope" - the scope of selection, which is always "sent"
"section" - section of document
"para" - paragraph number
"""
match_token = tokens[0]
if category is not None and document_tags is None:
document_tags = [key for key in DOCUMENT_INDEX
if DOCUMENT_INDEX[key]["category"] == category]
raw_results = self.doc_collection.simple_search(match_token, document_tags)
results = []
# return array of text references, consisting of tag, sentence and scope
for doc_tag in raw_results:
doc = self.doc_collection.extract_doc(doc_tag)["nlpdoc"]
index = self.doc_collection.extract_doc(doc_tag)["index"]
for ref in raw_results[doc_tag]:
found_ref = {}
found_ref["tag"] = doc_tag
begin = ref[2]
end = ref[3]
found_ref["selection"] = str(doc[begin:end].sent)
found_ref["scope"] = "sent"
lookup_info = index.lookup(begin)
found_ref["section"] = lookup_info["section"]
found_ref["para"] = lookup_info["paragraph"]
results.append(found_ref)
return results
def expand_selection(self, tag, token_ref, current_scope="sent", is_override=False):
"""
Given a document tag and a reference into it, get the text associated
with the next-bigger scope and returns the next bigger scope.
- If the next-bigger scope is identical, increase the current_scope until
you get something different
- If the value to be returned is greater than MAX_SELECTION_SIZE tokens
and is_override is false, then instead of returning something, an error
object will be returned indicating that the expand cannot be made because
it is too big. (In practice, this is used to prompt the user whether they
really want that much document text.)
:param tag - document tag
:param token_ref - numerical index into document, by token #
:param current_scope - SCOPE value indicating scope of text selection that has already
been returned to the client.
:param is_override - if false, will return error if section is above
MAX_SECTION_SIZE. If true, will ignore warning and return as much text
as is requested.
:returns - dictionary object indicating the following
"tag" - tag of document
"token_ref" - token_ref passed in
"selection" - text of new selection
"scope" - scope of new selection passed back
"""
txt = self.doc_collection.extract_doc(tag)["raw"]
index = self.doc_collection.extract_doc(tag)["index"]
doc_selector = DocUtility(index, txt)
span = self.doc_collection.extract_doc(tag)["nlpdoc"][token_ref:token_ref+1]
(text, scope) = doc_selector.get_next_scoped_selection(span,
current_scope, is_override)
return { "tag": tag,
"token_ref": token_ref,
"selection": text,
"scope": scope
}
| 41.886525 | 94 | 0.620894 |
b8be835c63b2fbd63c488bfb431e557843d49d69
| 321 |
py
|
Python
|
devices/api/views.py
|
ggidofalvy-tc/peering-manager
|
0c358b108494f51936264f3ec5d600c20827a745
|
[
"Apache-2.0"
] | 173 |
2020-08-08T15:38:08.000Z
|
2022-03-21T11:35:25.000Z
|
devices/api/views.py
|
ggidofalvy-tc/peering-manager
|
0c358b108494f51936264f3ec5d600c20827a745
|
[
"Apache-2.0"
] | 192 |
2020-08-08T22:03:45.000Z
|
2022-03-31T04:20:44.000Z
|
devices/api/views.py
|
ggidofalvy-tc/peering-manager
|
0c358b108494f51936264f3ec5d600c20827a745
|
[
"Apache-2.0"
] | 33 |
2020-08-14T21:24:38.000Z
|
2022-03-06T14:55:13.000Z
|
from devices.filters import PlatformFilterSet
from devices.models import Platform
from utils.api import ModelViewSet
from .serializers import PlatformSerializer
class PlatformViewSet(ModelViewSet):
queryset = Platform.objects.all()
serializer_class = PlatformSerializer
filterset_class = PlatformFilterSet
| 26.75 | 45 | 0.825545 |
b00911ba6f15f4db9f06ab3f3a6b12d4bc67f2ce
| 97 |
py
|
Python
|
tinyq/constants.py
|
mozillazg/tinyq
|
fd9ecc593931c9b315c4aeb9150389b3e4ae670e
|
[
"MIT"
] | 14 |
2017-08-02T23:30:16.000Z
|
2021-05-31T19:58:29.000Z
|
tinyq/constants.py
|
mozillazg/tinyq
|
fd9ecc593931c9b315c4aeb9150389b3e4ae670e
|
[
"MIT"
] | null | null | null |
tinyq/constants.py
|
mozillazg/tinyq
|
fd9ecc593931c9b315c4aeb9150389b3e4ae670e
|
[
"MIT"
] | 2 |
2017-03-13T09:36:05.000Z
|
2017-10-27T14:33:48.000Z
|
# -*- coding: utf-8 -*-
DEFAULT_SCHEDULE_QUEUE_KEY = 'schedules'
DEFAULT_JOB_QUEUE_KEY = 'jobs'
| 19.4 | 40 | 0.721649 |
b637e64b98840de93a7fe0a67583d8e680179b5e
| 2,365 |
py
|
Python
|
f5/bigip/tm/gtm/test/unit/test_topology.py
|
nghia-tran/f5-common-python
|
acb23a6e5830a119b460c19a578654113419f5c3
|
[
"Apache-2.0"
] | 272 |
2016-02-23T06:05:44.000Z
|
2022-02-20T02:09:32.000Z
|
f5/bigip/tm/gtm/test/unit/test_topology.py
|
nghia-tran/f5-common-python
|
acb23a6e5830a119b460c19a578654113419f5c3
|
[
"Apache-2.0"
] | 1,103 |
2016-02-11T17:48:03.000Z
|
2022-02-15T17:13:37.000Z
|
f5/bigip/tm/gtm/test/unit/test_topology.py
|
nghia-tran/f5-common-python
|
acb23a6e5830a119b460c19a578654113419f5c3
|
[
"Apache-2.0"
] | 167 |
2016-02-11T17:48:21.000Z
|
2022-01-17T20:13:05.000Z
|
# Copyright 2014-2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from f5.bigip import ManagementRoot
from f5.bigip.tm import Gtm
from f5.bigip.tm.gtm.topology import Topology
from f5.sdk_exception import InvalidName
from f5.sdk_exception import MissingRequiredCreationParameter
from f5.sdk_exception import UnsupportedOperation
@pytest.fixture
def FakeTopology(fakeicontrolsession):
mr = ManagementRoot('192.168.1.1', 'admin', 'admin')
fake_gtm = Gtm(mr.tm)
fake_top = Topology(fake_gtm)
return fake_top
class TestCreate(object):
def test_create_two(self, fakeicontrolsession):
b = ManagementRoot('192.168.1.1', 'admin', 'admin')
r1 = b.tm.gtm.topology_s.topology
r2 = b.tm.gtm.topology_s.topology
assert r1 is not r2
def test_create_no_args(self, FakeTopology):
with pytest.raises(MissingRequiredCreationParameter):
FakeTopology.create()
def test_invalid_name_no_keywords(self, FakeTopology):
with pytest.raises(InvalidName):
FakeTopology.create(name='fake_stuff')
def test_invalid_name_no_ldns(self, FakeTopology):
with pytest.raises(InvalidName):
FakeTopology.create(name='server: fake_stuff')
def test_invalid_name_no_server(self, FakeTopology):
with pytest.raises(InvalidName):
FakeTopology.create(name='ldns: fake_stuff')
class Test_Refresh_Modify_Update(object):
def test_refresh_raises(self, FakeTopology):
with pytest.raises(UnsupportedOperation):
FakeTopology.refresh()
def test_modify_raises(self, FakeTopology):
with pytest.raises(UnsupportedOperation):
FakeTopology.modify()
def test_update_raises(self, FakeTopology):
with pytest.raises(UnsupportedOperation):
FakeTopology.update()
| 33.785714 | 74 | 0.728964 |
32d086de6b59c03cd1d45dc1f7ba72ec776da397
| 1,296 |
py
|
Python
|
djsniper/users/tests/test_admin.py
|
justdjango/django-nft-sniper
|
75306fa223493ec60cf621489c264b43110970c8
|
[
"MIT"
] | 18 |
2021-12-24T05:45:35.000Z
|
2022-03-30T05:18:53.000Z
|
djsniper/users/tests/test_admin.py
|
zevcc-gh/nft-sniper
|
7ba41adffe7b5157b98a162aa72272a89fb9bda2
|
[
"MIT"
] | 1 |
2022-03-05T05:58:27.000Z
|
2022-03-05T05:58:27.000Z
|
djsniper/users/tests/test_admin.py
|
zevcc-gh/nft-sniper
|
7ba41adffe7b5157b98a162aa72272a89fb9bda2
|
[
"MIT"
] | 3 |
2022-01-09T16:58:02.000Z
|
2022-02-23T11:53:46.000Z
|
import pytest
from django.urls import reverse
from djsniper.users.models import User
pytestmark = pytest.mark.django_db
class TestUserAdmin:
def test_changelist(self, admin_client):
url = reverse("admin:users_user_changelist")
response = admin_client.get(url)
assert response.status_code == 200
def test_search(self, admin_client):
url = reverse("admin:users_user_changelist")
response = admin_client.get(url, data={"q": "test"})
assert response.status_code == 200
def test_add(self, admin_client):
url = reverse("admin:users_user_add")
response = admin_client.get(url)
assert response.status_code == 200
response = admin_client.post(
url,
data={
"username": "test",
"password1": "My_R@ndom-P@ssw0rd",
"password2": "My_R@ndom-P@ssw0rd",
},
)
assert response.status_code == 302
assert User.objects.filter(username="test").exists()
def test_view_user(self, admin_client):
user = User.objects.get(username="admin")
url = reverse("admin:users_user_change", kwargs={"object_id": user.pk})
response = admin_client.get(url)
assert response.status_code == 200
| 31.609756 | 79 | 0.627315 |
92344056812b06fc0e60d6715c64a2ff100f3231
| 10,247 |
py
|
Python
|
src/conan_app_launcher/ui/views/conan_search/conan_search.py
|
goszpeti/app_grid_conan
|
bd76c8528e6c6b839abd7a4d26d7194428faed7d
|
[
"MIT"
] | null | null | null |
src/conan_app_launcher/ui/views/conan_search/conan_search.py
|
goszpeti/app_grid_conan
|
bd76c8528e6c6b839abd7a4d26d7194428faed7d
|
[
"MIT"
] | null | null | null |
src/conan_app_launcher/ui/views/conan_search/conan_search.py
|
goszpeti/app_grid_conan
|
bd76c8528e6c6b839abd7a4d26d7194428faed7d
|
[
"MIT"
] | null | null | null |
import pprint
from typing import List, Optional
import conan_app_launcher.app as app # using global module pattern
from conan_app_launcher.core import open_file
from conan_app_launcher.ui.common import AsyncLoader, get_themed_asset_image
from conan_app_launcher.ui.dialogs import ConanInstallDialog
from conan_app_launcher.ui.fluent_window import FluentWindow
from conan_app_launcher.ui.views import LocalConanPackageExplorer
from conan_app_launcher.ui.widgets import RoundedMenu
from conans.model.ref import ConanFileReference
from PyQt5.QtCore import QPoint, Qt, pyqtBoundSignal, pyqtSlot
from PyQt5.QtGui import QIcon, QKeySequence
from PyQt5.QtWidgets import (QAction, QApplication, QDialog, QListWidgetItem,
QWidget)
from .conan_search_ui import Ui_Form
from .model import PROFILE_TYPE, PkgSearchModel, SearchedPackageTreeItem
class ConanSearchDialog(QDialog):
def __init__(self, parent: Optional[QWidget], conan_pkg_installed: Optional[pyqtBoundSignal] = None,
conan_pkg_removed: Optional[pyqtBoundSignal] = None, page_widgets: Optional[FluentWindow.PageStore] = None):
# Add minimize and maximize buttons
super().__init__(parent, Qt.WindowSystemMenuHint | Qt.WindowMaximizeButtonHint | Qt.WindowCloseButtonHint)
self.page_widgets = page_widgets
self.conan_pkg_installed = conan_pkg_installed
self.conan_pkg_removed = conan_pkg_removed
self._ui = Ui_Form()
self._ui.setupUi(self)
# init search bar
icon = QIcon(str(app.asset_path / "icons/icon.ico"))
self.setWindowIcon(icon)
self._ui.search_button.clicked.connect(self.on_search)
self._ui.search_button.setEnabled(False)
self._ui.search_line.validator_enabled = False
self._ui.search_line.textChanged.connect(self._enable_search_button)
self._ui.search_button.setShortcut(QKeySequence(Qt.Key_Return))
# init remotes list
remotes = app.conan_api.get_remotes()
for remote in remotes:
item = QListWidgetItem(remote.name, self._ui.remote_list)
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
item.setCheckState(Qt.Checked)
item.checkState
# sets height to the height of the items, but max 120
items_height = self._ui.remote_list.sizeHintForRow(
0) * self._ui.remote_list.count() + 2 * self._ui.remote_list.frameWidth()
self._ui.remote_list.setFixedHeight(min(items_height, 120))
self._pkg_result_model = PkgSearchModel()
self._pkg_result_loader = AsyncLoader(self)
self._ui.search_results_tree_view.setContextMenuPolicy(Qt.CustomContextMenu)
self._ui.search_results_tree_view.customContextMenuRequested.connect(self.on_pkg_context_menu_requested)
self.apply_theme()
def apply_theme(self):
icon = QIcon(get_themed_asset_image("icons/search_packages.png"))
self._init_pkg_context_menu()
self._ui.search_icon.setPixmap(icon.pixmap(20, 20))
def load(self): # TODO define interface for views
pass
def _enable_search_button(self):
""" Enable search button from minimum 3 characters onwards"""
if len(self._ui.search_line.text()) > 2:
self._ui.search_button.setEnabled(True)
else:
self._ui.search_button.setEnabled(False)
def _init_pkg_context_menu(self):
""" Initalize context menu with all actions """
self.select_cntx_menu = RoundedMenu()
self.copy_ref_action = QAction("Copy reference", self)
self.copy_ref_action.setIcon(QIcon(get_themed_asset_image("icons/copy_link.png")))
self.select_cntx_menu.addAction(self.copy_ref_action)
self.copy_ref_action.triggered.connect(self.on_copy_ref_requested)
self.show_conanfile_action = QAction("Show conanfile", self)
self.show_conanfile_action.setIcon(QIcon(get_themed_asset_image("icons/file_preview.png")))
self.select_cntx_menu.addAction(self.show_conanfile_action)
self.show_conanfile_action.triggered.connect(self.on_show_conanfile_requested)
self.install_pkg_action = QAction("Install package", self)
self.install_pkg_action.setIcon(QIcon(get_themed_asset_image("icons/download_pkg.png")))
self.select_cntx_menu.addAction(self.install_pkg_action)
self.install_pkg_action.triggered.connect(self.on_install_pkg_requested)
self.show_in_pkg_exp_action = QAction("Show in Package Explorer", self)
self.show_in_pkg_exp_action.setIcon(QIcon(
get_themed_asset_image("icons/search_packages.png")))
self.select_cntx_menu.addAction(self.show_in_pkg_exp_action)
self.show_in_pkg_exp_action.triggered.connect(self.on_show_in_pkg_exp)
@pyqtSlot(QPoint)
def on_pkg_context_menu_requested(self, position: QPoint):
"""
Executes, when context menu is requested.
This is done to dynamically grey out some options depending on the item type.
"""
item = self.get_selected_source_item(self._ui.search_results_tree_view)
if not item:
return
if item.empty:
return
if item.is_installed:
self.show_in_pkg_exp_action.setEnabled(True)
else:
self.show_in_pkg_exp_action.setEnabled(False)
self.select_cntx_menu.exec_(self._ui.search_results_tree_view.mapToGlobal(position))
@pyqtSlot()
def on_search(self):
""" Search for the user entered text by re-initing the model"""
# IMPORTANT! if put in async loading, the pyqt signal of the model will be created in another Qt thread
# and not be able to emit to the GUI thread.
if not self._ui.search_button.isEnabled():
return
self._pkg_result_model = PkgSearchModel(self.conan_pkg_installed, self.conan_pkg_removed)
self._pkg_result_loader.async_loading(
self, self._load_search_model, (), self._finish_load_search_model, "Searching for packages...")
# reset info text
self._ui.package_info_text.setText("")
@pyqtSlot()
def on_show_in_pkg_exp(self):
""" Switch to the main gui and select the item (ref or pkg) in the Local Package Explorer. """
item = self.get_selected_source_item(self._ui.search_results_tree_view)
if not item:
return
if not self.page_widgets:
return
self.page_widgets.get_page_by_type(LocalConanPackageExplorer).select_local_package_from_ref(
item.get_conan_ref(), refresh=True)
def _load_search_model(self):
""" Initialize tree view model by searching in conan """
self._pkg_result_model.setup_model_data(self._ui.search_line.text(), self.get_selected_remotes())
def _finish_load_search_model(self):
""" After conan search adjust the view """
self._ui.search_results_tree_view.setModel(self._pkg_result_model.proxy_model)
self._ui.search_results_tree_view.setColumnWidth(0, 320)
self._ui.search_results_tree_view.sortByColumn(1, Qt.AscendingOrder) # sort by remote at default
self._ui.search_results_tree_view.selectionModel().selectionChanged.connect(self.on_package_selected)
@pyqtSlot()
def on_package_selected(self):
""" Display package info only for pkg ref"""
item = self.get_selected_source_item(self._ui.search_results_tree_view)
if not item:
return
if item.type != PROFILE_TYPE:
return
pkg_info = pprint.pformat(item.pkg_data).translate(
{ord("{"): None, ord("}"): None, ord(","): None, ord("'"): None})
self._ui.package_info_text.setText(pkg_info)
@pyqtSlot()
def on_copy_ref_requested(self):
""" Copy the selected reference to the clipboard """
combined_ref = self.get_selected_combined_ref()
QApplication.clipboard().setText(combined_ref)
@pyqtSlot()
def on_show_conanfile_requested(self):
""" Show the conanfile by downloading and opening with the associated program """
combined_ref = self.get_selected_combined_ref()
conan_ref = combined_ref.split(":")[0]
conanfile = app.conan_api.get_conanfile_path(ConanFileReference.loads(conan_ref))
open_file(conanfile)
@pyqtSlot()
def on_install_pkg_requested(self):
""" Spawn the Conan install dialog """
combined_ref = self.get_selected_combined_ref()
dialog = ConanInstallDialog(self, combined_ref, self.conan_pkg_installed)
dialog.show()
def get_selected_remotes(self) -> List[str]:
""" Returns the user selected remotes """
selected_remotes = []
for i in range(self._ui.remote_list.count()):
item = self._ui.remote_list.item(i)
if item.checkState() == Qt.Checked:
selected_remotes.append(item.text())
return selected_remotes
def get_selected_combined_ref(self) -> str:
""" Returns the user selected ref in <ref>:<id> format """
# no need to map from postition, since rightclick selects a single item
source_item = self.get_selected_source_item(self._ui.search_results_tree_view)
if not source_item:
return ""
conan_ref_item = source_item
id_str = ""
if source_item.type == PROFILE_TYPE:
conan_ref_item = source_item.parent()
id_str = ":" + source_item.pkg_data.get("id", "")
if not conan_ref_item:
return ""
return conan_ref_item.item_data[0] + id_str
def get_selected_source_item(self, view) -> Optional[SearchedPackageTreeItem]:
""" Gets the selected item from a view """
indexes = view.selectedIndexes()
if not indexes:
return None
view_index = view.selectedIndexes()[0]
source_item = view_index.model().mapToSource(view_index).internalPointer()
return source_item
| 46.577273 | 126 | 0.684005 |
327f9d2937c86cfdd5e4bc7284870257183ce69a
| 16,477 |
py
|
Python
|
flask_unchained/commands/new.py
|
briancappello/flask-unchained
|
bff296b5c808f5b1db10f7dddb81054600545749
|
[
"MIT"
] | 69 |
2018-10-10T01:59:11.000Z
|
2022-03-29T17:29:30.000Z
|
flask_unchained/commands/new.py
|
briancappello/flask-unchained
|
bff296b5c808f5b1db10f7dddb81054600545749
|
[
"MIT"
] | 18 |
2018-11-17T12:42:02.000Z
|
2021-05-22T18:45:27.000Z
|
flask_unchained/commands/new.py
|
briancappello/flask-unchained
|
bff296b5c808f5b1db10f7dddb81054600545749
|
[
"MIT"
] | 7 |
2018-10-12T16:20:25.000Z
|
2021-10-06T12:18:21.000Z
|
import os
import re
import shutil
import sys
from flask_unchained.cli import click
from flask_unchained.click import default, should_prompt
from flask_unchained.string_utils import right_replace
from jinja2 import Environment
from typing import *
JINJA_START_STR = '{#!'
JINJA_END_STR = '#}'
OTHER_START_STR = '#! '
OTHER_INLINE_START_STR = '#!('
OTHER_INLINE_END_STR = ')'
env = Environment() # skipcq: BAN-B701
_excluded = object()
MODULE_NAME_RE = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$')
IF_RE = re.compile(r'^if (?P<condition>.+): ?(?P<statement>.+)?$')
ELIF_RE = re.compile(r'^elif (?P<condition>.+): ?(?P<statement>.+)?$')
ELSE_RE = re.compile(r'^else: ?(?P<statement>.+)?$')
TEMPLATES_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, '_code_templates'))
PROJECT_TEMPLATE = os.path.join(TEMPLATES_ROOT, 'project')
def _validate_module_name(ctx, param, value): # skipcq: PYL-W0613 (unused arg)
if not MODULE_NAME_RE.match(value):
raise click.BadParameter('must be a valid python module name '
'(letters, numbers, and underscore characters only)')
return value
class Token:
def __init__(self, line_num=0, token=None):
self._line_num = line_num
self.tokens = []
if token is not None:
self.tokens.append(token)
@property
def line_num(self):
return self._line_num + 1
@line_num.setter
def line_num(self, line_num):
self._line_num = line_num - 1
def render(self, ctx=None, *, _debug=False):
if len(self.tokens) == 1:
token = self.tokens[0]
if isinstance(token, str):
return token if not _debug else f'{self.line_num}: {token}'
return token.render(ctx, _debug=_debug)
lines = []
for t in self.tokens:
result = t.render(ctx, _debug=_debug)
if result is not _excluded:
lines.append(result)
return '\n'.join(lines)
def __repr__(self):
return f'{self.__class__.__name__}(tokens=\n{self.tokens!r})'
class InlineToken(Token):
def __init__(self, line_num, parts):
super().__init__(line_num)
self.tokens = parts
def render(self, ctx=None, *, _debug=False):
if len(self.tokens) == 1:
if isinstance(self.tokens[0], (str, InlineToken)):
return env.from_string(self.tokens[0]).render(**ctx)
return super().render(ctx, _debug=_debug)
parts = []
for t in self.tokens:
if isinstance(t, str):
parts.append(t)
continue
result = t.render(ctx, _debug=_debug)
if result is not _excluded:
parts.append(result)
return ('' if not _debug else f'{self.line_num}: ') + ''.join(parts)
def __str__(self):
if len(self.tokens) > 1:
return ''.join(str(t) for t in self.tokens)
return self.tokens[0]
class IfToken(Token):
def __init__(self, line_num, condition, statement):
super().__init__(line_num)
self.condition = condition
self.statement = statement
self.next = None
def render(self, ctx=None, *, _debug=False):
condition = (self.condition if isinstance(self.condition, (str, bytes))
else repr(self.condition))
if not eval(condition, env.globals, ctx): # skipcq: PYL-W0123
if self.next:
return self.next.render(ctx, _debug=_debug)
return _excluded
if self.statement:
result = env.from_string(self.statement).render(**ctx)
return result if not _debug else f'{self.line_num}: {result}'
else:
return super().render(ctx, _debug=_debug)
def __repr__(self):
return f'IfToken(cond={self.condition!r}, token={self.tokens[0]!r}, next={self.next!r})'
@click.group()
def new():
"""
Generate new code for your Flask Unchained projects.
"""
@new.command()
@click.argument('dest', type=click.Path(resolve_path=True),
help='The project folder.')
@click.option('-a', '--app-bundle', default='app',
help='The module name to use for your app bundle.',
callback=_validate_module_name)
@click.option('--force/--no-force', default=False, show_default=True,
help='Whether or not to force creation if project folder is not empty.')
@click.option('--prompt/--no-prompt',
is_eager=True, is_flag=True, expose_value=False,
help='Whether or not to skip prompting and just use the defaults.',
default=False, show_default=True,
callback=should_prompt)
@click.option('--dev/--no-dev', prompt='Development Mode',
help='Whether or not to install development dependencies.',
default=lambda: default(True), show_default=True)
@click.option('--admin/--no-admin', prompt='Admin Bundle',
help='Whether or not to install the Admin Bundle.',
default=lambda: default(False), show_default=True)
@click.option('--api/--no-api', prompt='API Bundle',
help='Whether or not to install the API Bundle.',
default=lambda: default(False), show_default=True)
@click.option('--celery/--no-celery', prompt='Celery Bundle',
help='Whether or not to install the Celery Bundle.',
default=lambda: default(False), show_default=True)
@click.option('--graphene/--no-graphene', prompt='Graphene Bundle',
help='Whether or not to install the Graphene Bundle.',
default=lambda: default(False), show_default=True)
@click.option('--mail/--no-mail', prompt='Mail Bundle',
help='Whether or not to install the Mail Bundle.',
default=lambda: default(False), show_default=True)
@click.option('--oauth/--no-oauth', prompt='OAuth Bundle',
help='Whether or not to install the OAuth Bundle.',
default=lambda: default(False), show_default=True)
@click.option('--security/--no-security', prompt='Security Bundle',
help='Whether or not to install the Security Bundle.',
default=lambda: default(False), show_default=True)
@click.option('--session/--no-session', prompt='Session Bundle',
help='Whether or not to install the Session Bundle.',
default=lambda: default(False), show_default=True)
@click.option('--sqlalchemy/--no-sqlalchemy', prompt='SQLAlchemy Bundle',
help='Whether or not to install the SQLAlchemy Bundle.',
default=lambda: default(False), show_default=True)
@click.option('--webpack/--no-webpack', prompt='Webpack Bundle',
help='Whether or not to install the Webpack Bundle.',
default=lambda: default(False), show_default=True)
def project(dest, app_bundle, force, dev,
admin, api, celery, graphene, mail, oauth,
security, session, sqlalchemy, webpack):
"""
Create a new Flask Unchained project.
"""
if os.path.exists(dest) and os.listdir(dest) and not force:
if not click.confirm(f'WARNING: Project directory {dest!r} exists and is '
f'not empty. It will be DELETED!!! Continue?'):
click.echo(f'Exiting.')
sys.exit(1)
# build up a list of dependencies
# IMPORTANT: keys here must match setup.py's `extra_requires` keys
ctx = dict(dev=dev, admin=admin, api=api, celery=celery, graphene=graphene,
mail=mail, oauth=oauth, security=security or oauth, session=security or session,
sqlalchemy=security or sqlalchemy, webpack=webpack)
ctx['requirements'] = [k for k, v in ctx.items() if v]
# remaining ctx vars
ctx['app_bundle_module_name'] = app_bundle
# copy the project template into place
copy_file_tree(PROJECT_TEMPLATE, dest, ctx, [
(option, files)
for option, files
in [('api', ['app/serializers']),
('celery', ['app/tasks',
'celery_app.py']),
('graphene', ['app/graphql']),
('mail', ['templates/email']),
('security', ['app/models/role.py',
'app/models/user.py',
'db/fixtures/Role.yaml',
'db/fixtures/User.yaml']),
('sqlalchemy', ['app/models',
'app/managers',
'db']),
('webpack', ['assets',
'package.json',
'webpack']),
]
if not ctx[option]
])
click.echo(f'Successfully created a new project at: {dest}')
click.echo('To get started, run the following commands:\n')
click.echo(f'cd {dest}')
click.echo('pip install -r requirements-dev.txt')
if ctx["sqlalchemy"]:
click.echo("flask db init")
click.echo("flask db migrate -m 'create models'")
click.echo("flask db upgrade")
click.echo("flask run")
def copy_file_tree(src: str,
dest: str,
ctx: Optional[Dict[str, Any]] = None,
option_locations: Optional[List[Tuple[str, List[str]]]] = None):
"""
Copy the file tree under the :param:`src` directory to the :param:`dest`
directory. Pass :param:`ctx` to support rendering the files, and pass
:param:`option_locations` to support deleting optional files/folders.
"""
if os.path.exists(dest):
shutil.rmtree(dest, ignore_errors=True)
shutil.copytree(src, dest)
if option_locations:
for _, paths in option_locations:
for path in paths:
path = os.path.join(dest, path)
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path, ignore_errors=True)
if 'app_bundle_module_name' in ctx:
shutil.move(os.path.join(dest, 'app'),
os.path.join(dest, ctx['app_bundle_module_name']))
shutil.move(os.path.join(dest, 'tests', 'app'),
os.path.join(dest, 'tests', ctx['app_bundle_module_name']))
_render_file_tree(dest, ctx)
def _render_file_tree(root_dir: str, ctx: Optional[Dict[str, Any]] = None):
if not ctx:
return
for dirpath, _, filenames in os.walk(root_dir):
for filename in filenames:
path = os.path.join(dirpath, filename)
if ('__pycache__' in path
or path.endswith('.pyc')
or path.endswith('.pyo')):
# absolutely no idea how this happens but whenever Flask Unchained
# gets installed via pip, this cache crap happens
os.remove(path)
continue
root_token = Token()
try:
with open(path) as f:
lines = f.read().split('\n')
root_token, _ = _process_tokens(lines, root_token,
is_jinja=path.endswith('.html'))
except UnicodeDecodeError as e:
raise Exception(f'UnicodeDecodeError: {path} ({str(e)})')
with open(path, 'w') as f:
f.write(root_token.render(ctx))
def _process_tokens(lines: List[str],
token: Token,
*,
is_jinja: bool = False,
_depth: int = 0,
_real_start_i: int = 0):
start_str = JINJA_START_STR if is_jinja else OTHER_START_STR
end_str = JINJA_END_STR if is_jinja else None
i: int = 0
resume_from_real_i: int = 0
for i, line in enumerate(lines):
if (_real_start_i + i) < resume_from_real_i:
continue
stripped = line.strip()
if not stripped.startswith(start_str):
token.tokens.append(
_extract_inline_token(line, _real_start_i + i, is_jinja))
continue
stripped = stripped[len(start_str):].strip()
if end_str:
stripped = right_replace(stripped, end_str, '').strip()
if stripped == 'endif' and _depth > 0:
return token, _real_start_i + i
if_m = IF_RE.match(stripped)
elif_m = ELIF_RE.match(stripped)
else_m = ELSE_RE.match(stripped)
if not any([if_m, elif_m, else_m]) and stripped != 'endif':
token.tokens.append(InlineToken(_real_start_i + i, [
line[:line.find(start_str)] + stripped,
]))
continue
next_start_i = _real_start_i + i + 1
if if_m is not None:
condition = if_m.groupdict()['condition']
statement = if_m.groupdict()['statement']
if_token = IfToken(_real_start_i + i, condition,
line[:line.find(start_str):] + statement
if statement else None)
if not statement:
if_token, resume_from_real_i = _process_tokens(lines[i + 1:], if_token,
is_jinja=is_jinja,
_depth=_depth + 1,
_real_start_i=next_start_i)
token.tokens.append(if_token)
elif elif_m is not None:
condition = elif_m.groupdict()['condition']
statement = elif_m.groupdict()['statement']
if_token = IfToken(_real_start_i + i, condition,
line[:line.find(start_str):] + statement
if statement else None)
if not statement:
if_token, resume_from_real_i = _process_tokens(lines[i + 1:], if_token,
is_jinja=is_jinja,
_depth=_depth,
_real_start_i=next_start_i)
token.next = if_token
elif else_m is not None:
statement = else_m.groupdict()['statement']
if_token = IfToken(_real_start_i + i, True,
line[:line.find(start_str):] + statement
if statement else None)
if not statement:
if_token, resume_from_real_i = _process_tokens(lines[i + 1:], if_token,
is_jinja=is_jinja,
_depth=_depth,
_real_start_i=next_start_i)
token.next = if_token
continue
return token, _real_start_i + i
def _extract_inline_token(line: str,
line_num: int,
is_jinja: bool = False):
start_str = JINJA_START_STR if is_jinja else OTHER_INLINE_START_STR
end_str = JINJA_END_STR if is_jinja else OTHER_INLINE_END_STR
if start_str not in line:
return Token(line_num, line)
def _clean_end(part):
if part.startswith(end_str):
return part[len(end_str):]
return part
end_i = 0
parts = []
while True:
start_i = line.find(start_str, end_i)
if start_i == -1:
remaining = _clean_end(line[end_i:])
if remaining:
parts.append(remaining)
break
parts.append(_clean_end(line[end_i:start_i]))
if is_jinja:
end_i = line.find(end_str, start_i)
part = line[start_i+len(start_str):end_i]
else:
start_i, end_i = _find_inline_start_end_indexes(line, start_i)
part = line[start_i:end_i].strip()
parts.append(InlineToken(line_num, [part]))
return InlineToken(line_num, parts)
def _find_inline_start_end_indexes(line, start_idx=0):
s = OTHER_INLINE_START_STR.strip()[-1]
e = OTHER_INLINE_END_STR
stack = 0
last_e = len(line)
for i, char in enumerate(line[start_idx:]):
if char == s:
stack += 1
elif char == e:
stack -= 1
if stack == 0:
last_e = start_idx + i
break
return line.find(s, start_idx) + len(s), last_e
| 38.769412 | 96 | 0.562906 |
4256068cff63730a9dd0cd8201382db87631f133
| 4,701 |
py
|
Python
|
src/cli.py
|
erick-dsnk/Electric
|
7e8aad1f792321d7839717ed97b641bee7a4a64e
|
[
"Apache-2.0"
] | null | null | null |
src/cli.py
|
erick-dsnk/Electric
|
7e8aad1f792321d7839717ed97b641bee7a4a64e
|
[
"Apache-2.0"
] | null | null | null |
src/cli.py
|
erick-dsnk/Electric
|
7e8aad1f792321d7839717ed97b641bee7a4a64e
|
[
"Apache-2.0"
] | null | null | null |
######################################################################
# SUPERCHARGECLI (EXTENSIONS) #
######################################################################
# -*- coding: utf-8 -*-
"""
Extension for the python ``click`` module to provide
a group with a git-like *did-you-mean* feature.
"""
import click
import difflib
__version__ = "0.0.3"
_click7 = click.__version__[0] >= '7'
class DYMMixin(object): # pylint: disable=too-few-public-methods
"""
Mixin class for click MultiCommand inherited classes
to provide git-like *did-you-mean* functionality when
a certain command is not registered.
"""
def __init__(self, *args, **kwargs):
self.max_suggestions = kwargs.pop("max_suggestions", 3)
self.cutoff = kwargs.pop("cutoff", 0.5)
super(DYMMixin, self).__init__(*args, **kwargs)
self._commands = {}
self._aliases = {}
def resolve_command(self, ctx, args):
"""
Overrides clicks ``resolve_command`` method
and appends *Did you mean ...* suggestions
to the raised exception message.
"""
original_cmd_name = click.utils.make_str(args[0])
try:
return super(DYMMixin, self).resolve_command(ctx, args)
except click.exceptions.UsageError as error:
error_msg = str(error)
matches = difflib.get_close_matches(original_cmd_name,
self.list_commands(ctx), self.max_suggestions, self.cutoff)
if matches:
error_msg += '\n\nDid you mean one of these?\n %s' % '\n '.join(matches) # pylint: disable=line-too-long
raise click.exceptions.UsageError(error_msg, error.ctx)
def command(self, *args, **kwargs):
aliases = kwargs.pop('aliases', [])
decorator = super(DYMMixin, self).command(*args, **kwargs)
if not aliases:
return decorator
def _decorator(f):
cmd = decorator(f)
if aliases:
self._commands[cmd.name] = aliases
for alias in aliases:
self._aliases[alias] = cmd.name
return cmd
return _decorator
def group(self, *args, **kwargs):
aliases = kwargs.pop('aliases', [])
decorator = super(DYMMixin, self).group(*args, **kwargs)
if not aliases:
return decorator
def _decorator(f):
cmd = decorator(f)
if aliases:
self._commands[cmd.name] = aliases
for alias in aliases:
self._aliases[alias] = cmd.name
return cmd
return _decorator
def resolve_alias(self, cmd_name):
if cmd_name in self._aliases:
return self._aliases[cmd_name]
return cmd_name
def get_command(self, ctx, cmd_name):
cmd_name = self.resolve_alias(cmd_name)
command = super(DYMMixin, self).get_command(ctx, cmd_name)
if command:
return command
def format_commands(self, ctx, formatter):
rows = []
sub_commands = self.list_commands(ctx)
max_len = max(len(cmd) for cmd in sub_commands)
limit = formatter.width - 6 - max_len
for sub_command in sub_commands:
cmd = self.get_command(ctx, sub_command)
if cmd is None:
continue
if hasattr(cmd, 'hidden') and cmd.hidden:
continue
if sub_command in self._commands:
aliases = ','.join(sorted(self._commands[sub_command]))
sub_command = '{0} ({1})'.format(sub_command, aliases)
if _click7:
cmd_help = cmd.get_short_help_str(limit)
else:
cmd_help = cmd.short_help or ''
rows.append((sub_command, cmd_help))
if rows:
with formatter.section('Commands'):
formatter.write_dl(rows)
class SuperChargeCLI(DYMMixin, click.Group): # pylint: disable=too-many-public-methods
"""
click Group to provide git-like
*did-you-mean* functionality when a certain
command is not found in the group.
"""
# def format_help(self, ctx, formatter):
# # Custom Help Message =>
# click.echo(click.style('Commands :', fg='green'))
# click.echo('Next Line')
class DYMCommandCollection(DYMMixin, click.CommandCollection): # pylint: disable=too-many-public-methods
"""
click CommandCollection to provide git-like
*did-you-mean* functionality when a certain
command is not found in the group.
"""
| 33.340426 | 127 | 0.563923 |
4c68e7e7bf9cc99e90ebe001029982a4c9edd544
| 10,450 |
py
|
Python
|
adminapp/models.py
|
utkuerol/ifs
|
1bd25ecabceeca58776eb3ff82aaaa370e5e157e
|
[
"MIT"
] | 1 |
2021-09-27T10:56:25.000Z
|
2021-09-27T10:56:25.000Z
|
adminapp/models.py
|
utkuerol/ifs
|
1bd25ecabceeca58776eb3ff82aaaa370e5e157e
|
[
"MIT"
] | null | null | null |
adminapp/models.py
|
utkuerol/ifs
|
1bd25ecabceeca58776eb3ff82aaaa370e5e157e
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from django.contrib.auth.models import User
from django.core.validators import MinValueValidator
from django.db import models
from django.urls import reverse
from .validators import validate_files
import django.utils.timezone as timezone
class Dataset(models.Model):
"""
Model for Dataset object in database.
Fields:
-name: name (models.CharField)
-description: description (models.CharField)
-date: creation date (models.DateTimeField)
-type: type of dataset {MNIST or HIPE} (models.ChoiceField)
-feature_file: feature file (models.FileField)
-normalized_feature_json: feature file with normalized values (models.CharField)
-raw_file = raw file (models.FileField)
"""
TYPE_CHOICES = (
("HIPE", "HIPE"),
("MNIST", "MNIST")
)
name = models.CharField(max_length=10000, null=False, unique=True)
description = models.CharField(max_length=10000, null=False)
date = models.DateTimeField(default=timezone.now, blank=True, null=False)
type = models.CharField(max_length=10000, null=False, choices=TYPE_CHOICES)
feature_file = models.FileField(upload_to='documents/%Y/%m/%d', null=False)
normalized_feature_JSON = models.CharField(max_length=10000000, default=None, blank=True, null=True)
raw_file = models.FileField(upload_to='documents/%Y/%m/%d', null=False)
def get_absolute_url(self):
return reverse('dataset-detail', kwargs={'pk': self.pk})
def get_update_url(self):
return reverse('dataset-update', kwargs={'pk': self.pk})
def get_delete_url(self):
return reverse('dataset-delete', kwargs={'pk': self.pk})
def clean(self):
"""
override the method to validate feature and raw data files.
"""
feature_file = self.feature_file
raw_file = self.raw_file
type = self.type
if feature_file and raw_file and type:
validate_files(feature_file, raw_file, type)
class Setup(models.Model):
"""
Model for Setup object in database.
Fields:
-dataset_id: id of the dataset for the experiment (models.ForeignKey)
-name: name (models.CharField)
-description: description (models.CharField)
-classifier: selection of the classifier type {VanillaSVDD, SVDDNeg, SSAD (for further info: https://github.com/englhardt/OcalAPI.jl)} {required input for OcalAPI} (models.ChoiceField)
-query_strategy: selection of the query strategy
{MinimumMarginQs, ExpectedMinimumMarginQs, MaximumEntropyQs, MaximumEntropyQs, MaximumLossQs, HighConfidenceQs, DecisionBoundaryQs, NeighborhoodBasedQs, BoundaryNeighborCombination, RandomQs, RandomOutlierQs}
{required input for OcalAPI} (models.ChoiceField)
-parameters: gamma value for the classifier (models.FloatField)
-cost-function: cost value for the classifier (models.FloatField)
-feedback_mode: selection of feedback mode {User, OcalAPI or Hybrid} (models.ChoiceField)
-number_of_iterations: permitted number Of Iterations (models.IntegerField)
-status: status {draft, final} (models.ChoiceField)
-unknown_allowed: if true user can choose "Unknown" as an answer (models.BooleanField)
-raw_data_visible: if true user can see raw data (models.BooleanField)
-feature_data_visible: if true user can see all columns from the feature data (models.BooleanField)
-date: creation date (models.DateTimeField)
-subspace_gridpoints_JSON: JSON File for the calculated gridpoints for each subspace (models.CharField)
"""
YN_CHOICES = (
("Yes", "Yes"),
("No", "No")
)
CLASSIFIER_CHOICES = (
("VanillaSVDD", "Vanilla Support Vector Data Description"),
("SVDDneg", "SVDD with negative examples"),
("SSAD", "Semi-supervised Anomaly Detection")
)
QS_CHOICES = (
("MinimumMarginQs", "MinimumMarginQs"),
("ExpectedMinimumMarginQs", "ExpectedMinimumMarginQs"),
("MaximumLossQs", "MaximumLossQs"),
("MaximumEntropyQs", "MaximumEntropyQs"),
("HighConfidenceQs", "HighConfidenceQs"),
("DecisionBoundaryQs", "DecisionBoundaryQs"),
("NeighborhoodBasedQs", "NeighborhoodBasedQs"),
("BoundaryNeighborCombination", "BoundaryNeighborCombination"),
("RandomQs", "RandomQs"),
("RandomOutlierQs", "RandomOutlierQs")
)
FM_CHOICES = (
("user", "User"),
("system", "System"),
("hybrid", "Hybrid")
)
SETUP_STATUS_CHOICES = (
("draft", "draft"),
("final", "final")
)
dataset_id = models.ForeignKey(Dataset, on_delete=models.CASCADE, null=False)
name = models.CharField(max_length=10000, null=False, unique=True)
description = models.CharField(max_length=10000, null=False)
classifier = models.CharField(max_length=100, null=True, choices=CLASSIFIER_CHOICES)
gamma = models.FloatField(null=True, validators=[MinValueValidator(0.01)])
cost_function = models.FloatField(null=True, validators=[MinValueValidator(0.01)])
query_strategy = models.CharField(max_length=100, null=True, choices=QS_CHOICES)
feedback_mode = models.CharField(null=False, max_length=100, choices=FM_CHOICES)
number_of_iterations = models.IntegerField(null=True, validators=[MinValueValidator(1)])
status = models.CharField(null=False, max_length=100, default="draft", choices=SETUP_STATUS_CHOICES)
unknown_allowed = models.CharField(max_length=100, null=True, choices=YN_CHOICES)
raw_data_visible = models.CharField(max_length=100, null=True, choices=YN_CHOICES)
feature_data_visible = models.CharField(max_length=100, null=True, choices=YN_CHOICES)
date = models.DateTimeField(default=timezone.now, blank=True, null=False)
subspaces_gridpoints_JSON = models.CharField(max_length=100000000, default=None, blank=True, null=True)
def get_absolute_url(self):
return reverse('setup-detail', kwargs={'pk': self.pk})
def get_update_url(self):
return reverse('setup-update', kwargs={'pk': self.pk})
def get_delete_url(self):
return reverse('setup-delete', kwargs={'pk': self.pk})
class Session(models.Model):
"""
Model for Session object in database.
Fields:
-setup_id: id of the setup that session belongs to (models.ForeignKey)
-user_id: id of the user participating to the session (models.ForeignKey)
-status: status {inactive, ongoing, finished, accepted} (models.ChoiceField)
-date: creation date (models.DateTimeField)
"""
SESSION_STATUS_CHOICES = (
("inactive", "inactive"),
("ongoing", "ongoing"),
("finished", "finished"),
("accepted", "accepted"),
("not_completed", "not_completed")
)
setup_id = models.ForeignKey(Setup, on_delete=models.CASCADE, null=False)
user_id = models.ForeignKey(User, on_delete=models.CASCADE, default=None, blank=False)
status = models.CharField(null=False, max_length=100, choices=SESSION_STATUS_CHOICES)
date = models.DateTimeField(default=timezone.now, blank=True, null=False)
def get_absolute_url(self):
return reverse('session-detail', kwargs={'pk': self.pk})
class Iteration(models.Model):
"""
Model for Iteration object in database.
Fields:
-session_id: id of the session that iteration belongs to (models.ForeignKey)
-duration: time spent by user on the iteration (models.TimeField)
-iteration_order: order of the iteration (models.IntegerField)
-ocal_query_id: chosen object by OcalAPI for the feedback (models.IntegerField)
-ocal_prediction: predicted labeling of OcalAPI (models.CharField)
-user_feedback: feedback of the user for the object {Inlier, Outlier, Unknown} (models.ChoiceField)
-ocal_output: calculation of OcalAPI after getting the user feedback (models.FileField)
"""
session_id = models.ForeignKey(Session, on_delete=models.CASCADE, null=False)
duration = models.IntegerField(default=0, blank=True, null=True)
iteration_order = models.IntegerField(null=False)
ocal_query_id = models.IntegerField(null=True)
ocal_prediction = models.CharField(max_length=100, null=True)
user_feedback = models.CharField(max_length=100, null=True)
ocal_output = models.CharField(max_length=100000, null=True)
class Meta:
index_together = [
("session_id", "iteration_order")
]
class Subspace(models.Model):
"""
Model for Subspace object in database.
Fields:
-setup_id: id of the setup that subspace belongs to (models.ForeignKey)
-feature_x_id: id of the feature to be used in x-Axis of the subspace (models.IntegerField)
-feature_y_id: id of the feature to be used in y-Axis of the subspace (models.IntegerField)
-gridpoints_x: amount of points in each row in the gridpoints layout (models.IntegerField)
-gridpoints_y: amount of points in each column in the gridpoints layout (models.IntegerField)
"""
setup_id = models.ForeignKey(Setup, on_delete=models.CASCADE, null=False)
feature_x_id = models.IntegerField(null=False)
feature_y_id = models.IntegerField(null=False)
gridpoints_x = models.IntegerField(null=True)
gridpoints_y = models.IntegerField(null=True)
class Meta:
index_together = [
("setup_id", "feature_x_id", "feature_y_id")
]
class SubspaceUserInteractionStats(models.Model):
"""
Model for SubspaceUserInteractionStats object in database.
Fields:
-subspace_id: id of subspace that the statistics belongs to (models.ForeignKey)
-iteration_id: id of iteration that subspace belongs to (models.ForeignKey)
-duration: time spent by user on observing the subspace (models.TimeField)
"""
subspace_id = models.ForeignKey(Subspace, on_delete=models.CASCADE, null=False)
iteration_id = models.ForeignKey(Iteration, on_delete=models.CASCADE, null=False)
duration = models.IntegerField(default=0, blank=True, null=True)
class Meta:
index_together = [
("subspace_id", "iteration_id")
]
| 46.035242 | 236 | 0.685455 |
1fc93a89f7883fce3d1a129472c7b324f1443e3a
| 77 |
py
|
Python
|
20170102_7.py
|
JaeGyu/PythonEx_1
|
e67053db6ca7431c3dd66351c190c53229e3f141
|
[
"MIT"
] | null | null | null |
20170102_7.py
|
JaeGyu/PythonEx_1
|
e67053db6ca7431c3dd66351c190c53229e3f141
|
[
"MIT"
] | null | null | null |
20170102_7.py
|
JaeGyu/PythonEx_1
|
e67053db6ca7431c3dd66351c190c53229e3f141
|
[
"MIT"
] | null | null | null |
def func(a):
return a()
def func2():
print("test")
func(func2)
| 6.416667 | 17 | 0.532468 |
495bd34d2028cf07547cf287f953b514ef0fa83a
| 331 |
py
|
Python
|
example/multi_process.py
|
nlp4faith/transformers-as-service
|
d47e8ab2a809623834d5aefb90c9339f6ea08541
|
[
"MIT"
] | 1 |
2020-12-15T10:37:40.000Z
|
2020-12-15T10:37:40.000Z
|
example/multi_process.py
|
nlp4faith/transformers-as-service
|
d47e8ab2a809623834d5aefb90c9339f6ea08541
|
[
"MIT"
] | null | null | null |
example/multi_process.py
|
nlp4faith/transformers-as-service
|
d47e8ab2a809623834d5aefb90c9339f6ea08541
|
[
"MIT"
] | null | null | null |
from multiprocessing import Pool
import time
def f(x):
return x*x
if __name__ == '__main__':
dicts = {'max_position_embeddings': 512}
print(dicts)
# with Pool(2) as pool:
# # print(p.map(f, [1, 2, 3]))
# time.sleep(1)
# ret = pool.apply(f, (10,))
# print(ret)
# print('hello')
| 20.6875 | 44 | 0.549849 |
a4842b2eae755bfa6dbca319f98197003a7e810e
| 5,254 |
py
|
Python
|
volume/bin/pythonpath/deprecated/classic.py
|
mottosso/docker
|
87c2870ed0fba482d3afe67034cd5baabae97dcc
|
[
"MIT"
] | 5 |
2018-05-24T23:15:02.000Z
|
2020-01-02T06:53:15.000Z
|
volume/bin/pythonpath/deprecated/classic.py
|
mottosso/docker
|
87c2870ed0fba482d3afe67034cd5baabae97dcc
|
[
"MIT"
] | 42 |
2018-05-25T15:57:08.000Z
|
2021-01-17T18:39:59.000Z
|
volume/bin/pythonpath/deprecated/classic.py
|
mottosso/docker
|
87c2870ed0fba482d3afe67034cd5baabae97dcc
|
[
"MIT"
] | 2 |
2018-05-25T14:42:04.000Z
|
2018-06-28T10:10:40.000Z
|
# -*- coding: utf-8 -*-
"""
Classic deprecation warning
===========================
Classic ``@deprecated`` decorator to deprecate old python classes, functions or methods.
"""
import functools
import inspect
import warnings
import wrapt
string_types = (type(b''), type(u''))
class ClassicAdapter(wrapt.AdapterFactory):
"""
Classic adapter -- *for advanced usage only*
This adapter is used to get the deprecation message according to the wrapped object type:
class, function, standard method, static method, or class method.
This is the base class of the :class:`deprecated.sphinx.SphinxAdapter` class
which is used to update the wrapped object docstring.
You can also inherit this class to change the deprecation message.
In the following example, we change the message into "The ... is deprecated.":
.. code-block:: python
import inspect
from deprecated.classic import ClassicAdapter
from deprecated.classic import deprecated
class MyClassicAdapter(ClassicAdapter):
def get_deprecated_msg(self, wrapped, instance):
if instance is None:
if inspect.isclass(wrapped):
fmt = "The class {name} is deprecated."
else:
fmt = "The function {name} is deprecated."
else:
if inspect.isclass(instance):
fmt = "The class method {name} is deprecated."
else:
fmt = "The method {name} is deprecated."
if self.reason:
fmt += " ({reason})"
if self.version:
fmt += " -- Deprecated since version {version}."
return fmt.format(name=wrapped.__name__,
reason=self.reason or "",
version=self.version or "")
Then, you can use your ``MyClassicAdapter`` class like this in your source code:
.. code-block:: python
@deprecated(reason="use another function", adapter_cls=MyClassicAdapter)
def some_old_function(x, y):
return x + y
"""
# todo: add docstring
def __init__(self, reason="", version=""):
self.reason = reason or ""
self.version = version or ""
super(ClassicAdapter, self).__init__()
def get_deprecated_msg(self, wrapped, instance):
if instance is None:
if inspect.isclass(wrapped):
fmt = "Call to deprecated class {name}."
else:
fmt = "Call to deprecated function (or staticmethod) {name}."
else:
if inspect.isclass(instance):
fmt = "Call to deprecated class method {name}."
else:
fmt = "Call to deprecated method {name}."
if self.reason:
fmt += " ({reason})"
if self.version:
fmt += " -- Deprecated since version {version}."
return fmt.format(name=wrapped.__name__,
reason=self.reason or "",
version=self.version or "")
def __call__(self, wrapped):
return wrapped
def deprecated(*args, **kwargs):
"""
This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
**Classic usage:**
To use this, decorate your deprecated function with **@deprecated** decorator:
.. code-block:: python
from deprecated import deprecated
@deprecated
def some_old_function(x, y):
return x + y
You can also decorate a class or a method:
.. code-block:: python
from deprecated import deprecated
class SomeClass(object):
@deprecated
def some_old_method(self, x, y):
return x + y
@deprecated
class SomeOldClass(object):
pass
You can give a "reason" message to help the developer to choose another function/class,
and a "version" number to specify the starting version number of the deprecation.
.. code-block:: python
from deprecated import deprecated
@deprecated(reason="use another function", version='1.2.0')
def some_old_function(x, y):
return x + y
"""
if args and isinstance(args[0], string_types):
kwargs['reason'] = args[0]
args = args[1:]
if args and not callable(args[0]):
raise TypeError(repr(type(args[0])))
if args:
action = kwargs.pop('action', 'always')
category = kwargs.pop('category', DeprecationWarning)
adapter_cls = kwargs.pop('adapter_cls', ClassicAdapter)
adapter = adapter_cls(**kwargs)
@wrapt.decorator(adapter=adapter)
def wrapper(wrapped_, instance_, args_, kwargs_):
msg = adapter.get_deprecated_msg(wrapped_, instance_)
with warnings.catch_warnings():
warnings.simplefilter(action, category)
warnings.warn(msg, category=category, stacklevel=2)
return wrapped_(*args_, **kwargs_)
return wrapper(args[0])
return functools.partial(deprecated, **kwargs)
| 30.905882 | 93 | 0.589265 |
43e22a4ad79e0f6f39bea750d69f48e7aff40dbe
| 989 |
py
|
Python
|
examples/calculate_compound_interest.py
|
mengguo2/accy
|
d99d9bbcaa80265f90101ce6bd20ad5788556def
|
[
"MIT"
] | null | null | null |
examples/calculate_compound_interest.py
|
mengguo2/accy
|
d99d9bbcaa80265f90101ce6bd20ad5788556def
|
[
"MIT"
] | null | null | null |
examples/calculate_compound_interest.py
|
mengguo2/accy
|
d99d9bbcaa80265f90101ce6bd20ad5788556def
|
[
"MIT"
] | 1 |
2018-06-17T23:43:29.000Z
|
2018-06-17T23:43:29.000Z
|
from accy.accy_functions import cpd_int
"""
A. If a 15-year-old student deposits $500 to an online savings account,
which offers a constant monthly interest rate of 1.6%, what would
his savings be by the time he is 30 years old, assuming he does not
add put in any extra money?
"""
initial_dep = 500
interest_rate = 0.016
nyears = 30 - 15
total_savings = cpd_int(initial_dep, interest_rate, nyears)
print('A. The student would have ${0:.2f}!'.format(total_savings))
"""
B. What would the total amount be if the interest rate was halved to 0.8%?
"""
"""
C. Again assuming the interest rate is 1.6%, how much money would have
accrued by the time he is 50 years old?
"""
"""
D. What if the student deposited the $500 when he was 20 instead of 15?
How much money would he have by the time he is 30 years old?
"""
"""
E. Again assuming the student starts depositing at 15 years old,
what is his total if he adds an extra 100$ every year until he is 30?
"""
| 30.90625 | 76 | 0.704752 |
80f389626a351c33407f350cd0c7784145e8539b
| 26,921 |
py
|
Python
|
wavefront_api_client/__init__.py
|
httpsgithu/python-client
|
f85a530367cdabe458a11919ad35609b9bc0606b
|
[
"Apache-2.0"
] | null | null | null |
wavefront_api_client/__init__.py
|
httpsgithu/python-client
|
f85a530367cdabe458a11919ad35609b9bc0606b
|
[
"Apache-2.0"
] | null | null | null |
wavefront_api_client/__init__.py
|
httpsgithu/python-client
|
f85a530367cdabe458a11919ad35609b9bc0606b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# flake8: noqa
"""
Wavefront REST API Documentation
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from wavefront_api_client.api.access_policy_api import AccessPolicyApi
from wavefront_api_client.api.account__user_and_service_account_api import AccountUserAndServiceAccountApi
from wavefront_api_client.api.alert_api import AlertApi
from wavefront_api_client.api.api_token_api import ApiTokenApi
from wavefront_api_client.api.cloud_integration_api import CloudIntegrationApi
from wavefront_api_client.api.dashboard_api import DashboardApi
from wavefront_api_client.api.derived_metric_api import DerivedMetricApi
from wavefront_api_client.api.direct_ingestion_api import DirectIngestionApi
from wavefront_api_client.api.event_api import EventApi
from wavefront_api_client.api.external_link_api import ExternalLinkApi
from wavefront_api_client.api.ingestion_spy_api import IngestionSpyApi
from wavefront_api_client.api.integration_api import IntegrationApi
from wavefront_api_client.api.maintenance_window_api import MaintenanceWindowApi
from wavefront_api_client.api.message_api import MessageApi
from wavefront_api_client.api.metric_api import MetricApi
from wavefront_api_client.api.metrics_policy_api import MetricsPolicyApi
from wavefront_api_client.api.monitored_application_api import MonitoredApplicationApi
from wavefront_api_client.api.monitored_service_api import MonitoredServiceApi
from wavefront_api_client.api.notificant_api import NotificantApi
from wavefront_api_client.api.proxy_api import ProxyApi
from wavefront_api_client.api.query_api import QueryApi
from wavefront_api_client.api.recent_app_map_search_api import RecentAppMapSearchApi
from wavefront_api_client.api.recent_traces_search_api import RecentTracesSearchApi
from wavefront_api_client.api.role_api import RoleApi
from wavefront_api_client.api.saved_app_map_search_api import SavedAppMapSearchApi
from wavefront_api_client.api.saved_app_map_search_group_api import SavedAppMapSearchGroupApi
from wavefront_api_client.api.saved_search_api import SavedSearchApi
from wavefront_api_client.api.saved_traces_search_api import SavedTracesSearchApi
from wavefront_api_client.api.saved_traces_search_group_api import SavedTracesSearchGroupApi
from wavefront_api_client.api.search_api import SearchApi
from wavefront_api_client.api.source_api import SourceApi
from wavefront_api_client.api.span_sampling_policy_api import SpanSamplingPolicyApi
from wavefront_api_client.api.usage_api import UsageApi
from wavefront_api_client.api.user_api import UserApi
from wavefront_api_client.api.user_group_api import UserGroupApi
from wavefront_api_client.api.webhook_api import WebhookApi
# import ApiClient
from wavefront_api_client.api_client import ApiClient
from wavefront_api_client.configuration import Configuration
# import models into sdk package
from wavefront_api_client.models.aws_base_credentials import AWSBaseCredentials
from wavefront_api_client.models.access_control_element import AccessControlElement
from wavefront_api_client.models.access_control_list_read_dto import AccessControlListReadDTO
from wavefront_api_client.models.access_control_list_simple import AccessControlListSimple
from wavefront_api_client.models.access_control_list_write_dto import AccessControlListWriteDTO
from wavefront_api_client.models.access_policy import AccessPolicy
from wavefront_api_client.models.access_policy_rule_dto import AccessPolicyRuleDTO
from wavefront_api_client.models.account import Account
from wavefront_api_client.models.alert import Alert
from wavefront_api_client.models.alert_dashboard import AlertDashboard
from wavefront_api_client.models.alert_min import AlertMin
from wavefront_api_client.models.alert_route import AlertRoute
from wavefront_api_client.models.alert_source import AlertSource
from wavefront_api_client.models.annotation import Annotation
from wavefront_api_client.models.anomaly import Anomaly
from wavefront_api_client.models.app_dynamics_configuration import AppDynamicsConfiguration
from wavefront_api_client.models.app_search_filter import AppSearchFilter
from wavefront_api_client.models.app_search_filter_value import AppSearchFilterValue
from wavefront_api_client.models.app_search_filters import AppSearchFilters
from wavefront_api_client.models.azure_activity_log_configuration import AzureActivityLogConfiguration
from wavefront_api_client.models.azure_base_credentials import AzureBaseCredentials
from wavefront_api_client.models.azure_configuration import AzureConfiguration
from wavefront_api_client.models.chart import Chart
from wavefront_api_client.models.chart_settings import ChartSettings
from wavefront_api_client.models.chart_source_query import ChartSourceQuery
from wavefront_api_client.models.class_loader import ClassLoader
from wavefront_api_client.models.cloud_integration import CloudIntegration
from wavefront_api_client.models.cloud_trail_configuration import CloudTrailConfiguration
from wavefront_api_client.models.cloud_watch_configuration import CloudWatchConfiguration
from wavefront_api_client.models.conversion import Conversion
from wavefront_api_client.models.conversion_object import ConversionObject
from wavefront_api_client.models.customer_facing_user_object import CustomerFacingUserObject
from wavefront_api_client.models.dashboard import Dashboard
from wavefront_api_client.models.dashboard_min import DashboardMin
from wavefront_api_client.models.dashboard_parameter_value import DashboardParameterValue
from wavefront_api_client.models.dashboard_section import DashboardSection
from wavefront_api_client.models.dashboard_section_row import DashboardSectionRow
from wavefront_api_client.models.derived_metric_definition import DerivedMetricDefinition
from wavefront_api_client.models.dynatrace_configuration import DynatraceConfiguration
from wavefront_api_client.models.ec2_configuration import EC2Configuration
from wavefront_api_client.models.event import Event
from wavefront_api_client.models.event_search_request import EventSearchRequest
from wavefront_api_client.models.event_time_range import EventTimeRange
from wavefront_api_client.models.external_link import ExternalLink
from wavefront_api_client.models.facet_response import FacetResponse
from wavefront_api_client.models.facet_search_request_container import FacetSearchRequestContainer
from wavefront_api_client.models.facets_response_container import FacetsResponseContainer
from wavefront_api_client.models.facets_search_request_container import FacetsSearchRequestContainer
from wavefront_api_client.models.fast_reader_builder import FastReaderBuilder
from wavefront_api_client.models.field import Field
from wavefront_api_client.models.gcp_billing_configuration import GCPBillingConfiguration
from wavefront_api_client.models.gcp_configuration import GCPConfiguration
from wavefront_api_client.models.history_entry import HistoryEntry
from wavefront_api_client.models.history_response import HistoryResponse
from wavefront_api_client.models.ingestion_policy import IngestionPolicy
from wavefront_api_client.models.ingestion_policy_mapping import IngestionPolicyMapping
from wavefront_api_client.models.install_alerts import InstallAlerts
from wavefront_api_client.models.integration import Integration
from wavefront_api_client.models.integration_alert import IntegrationAlert
from wavefront_api_client.models.integration_alias import IntegrationAlias
from wavefront_api_client.models.integration_dashboard import IntegrationDashboard
from wavefront_api_client.models.integration_manifest_group import IntegrationManifestGroup
from wavefront_api_client.models.integration_metrics import IntegrationMetrics
from wavefront_api_client.models.integration_status import IntegrationStatus
from wavefront_api_client.models.json_node import JsonNode
from wavefront_api_client.models.kubernetes_component import KubernetesComponent
from wavefront_api_client.models.kubernetes_component_status import KubernetesComponentStatus
from wavefront_api_client.models.logical_type import LogicalType
from wavefront_api_client.models.maintenance_window import MaintenanceWindow
from wavefront_api_client.models.message import Message
from wavefront_api_client.models.metric_details import MetricDetails
from wavefront_api_client.models.metric_details_response import MetricDetailsResponse
from wavefront_api_client.models.metric_status import MetricStatus
from wavefront_api_client.models.metrics_policy_read_model import MetricsPolicyReadModel
from wavefront_api_client.models.metrics_policy_write_model import MetricsPolicyWriteModel
from wavefront_api_client.models.module import Module
from wavefront_api_client.models.module_descriptor import ModuleDescriptor
from wavefront_api_client.models.module_layer import ModuleLayer
from wavefront_api_client.models.monitored_application_dto import MonitoredApplicationDTO
from wavefront_api_client.models.monitored_cluster import MonitoredCluster
from wavefront_api_client.models.monitored_service_dto import MonitoredServiceDTO
from wavefront_api_client.models.new_relic_configuration import NewRelicConfiguration
from wavefront_api_client.models.new_relic_metric_filters import NewRelicMetricFilters
from wavefront_api_client.models.notificant import Notificant
from wavefront_api_client.models.notification_messages import NotificationMessages
from wavefront_api_client.models.package import Package
from wavefront_api_client.models.paged import Paged
from wavefront_api_client.models.paged_account import PagedAccount
from wavefront_api_client.models.paged_alert import PagedAlert
from wavefront_api_client.models.paged_alert_with_stats import PagedAlertWithStats
from wavefront_api_client.models.paged_anomaly import PagedAnomaly
from wavefront_api_client.models.paged_cloud_integration import PagedCloudIntegration
from wavefront_api_client.models.paged_customer_facing_user_object import PagedCustomerFacingUserObject
from wavefront_api_client.models.paged_dashboard import PagedDashboard
from wavefront_api_client.models.paged_derived_metric_definition import PagedDerivedMetricDefinition
from wavefront_api_client.models.paged_derived_metric_definition_with_stats import PagedDerivedMetricDefinitionWithStats
from wavefront_api_client.models.paged_event import PagedEvent
from wavefront_api_client.models.paged_external_link import PagedExternalLink
from wavefront_api_client.models.paged_ingestion_policy import PagedIngestionPolicy
from wavefront_api_client.models.paged_integration import PagedIntegration
from wavefront_api_client.models.paged_maintenance_window import PagedMaintenanceWindow
from wavefront_api_client.models.paged_message import PagedMessage
from wavefront_api_client.models.paged_monitored_application_dto import PagedMonitoredApplicationDTO
from wavefront_api_client.models.paged_monitored_cluster import PagedMonitoredCluster
from wavefront_api_client.models.paged_monitored_service_dto import PagedMonitoredServiceDTO
from wavefront_api_client.models.paged_notificant import PagedNotificant
from wavefront_api_client.models.paged_proxy import PagedProxy
from wavefront_api_client.models.paged_recent_app_map_search import PagedRecentAppMapSearch
from wavefront_api_client.models.paged_recent_traces_search import PagedRecentTracesSearch
from wavefront_api_client.models.paged_related_event import PagedRelatedEvent
from wavefront_api_client.models.paged_report_event_anomaly_dto import PagedReportEventAnomalyDTO
from wavefront_api_client.models.paged_role_dto import PagedRoleDTO
from wavefront_api_client.models.paged_saved_app_map_search import PagedSavedAppMapSearch
from wavefront_api_client.models.paged_saved_app_map_search_group import PagedSavedAppMapSearchGroup
from wavefront_api_client.models.paged_saved_search import PagedSavedSearch
from wavefront_api_client.models.paged_saved_traces_search import PagedSavedTracesSearch
from wavefront_api_client.models.paged_saved_traces_search_group import PagedSavedTracesSearchGroup
from wavefront_api_client.models.paged_service_account import PagedServiceAccount
from wavefront_api_client.models.paged_source import PagedSource
from wavefront_api_client.models.paged_span_sampling_policy import PagedSpanSamplingPolicy
from wavefront_api_client.models.paged_user_group_model import PagedUserGroupModel
from wavefront_api_client.models.point import Point
from wavefront_api_client.models.policy_rule_read_model import PolicyRuleReadModel
from wavefront_api_client.models.policy_rule_write_model import PolicyRuleWriteModel
from wavefront_api_client.models.proxy import Proxy
from wavefront_api_client.models.query_event import QueryEvent
from wavefront_api_client.models.query_result import QueryResult
from wavefront_api_client.models.query_type_dto import QueryTypeDTO
from wavefront_api_client.models.raw_timeseries import RawTimeseries
from wavefront_api_client.models.recent_app_map_search import RecentAppMapSearch
from wavefront_api_client.models.recent_traces_search import RecentTracesSearch
from wavefront_api_client.models.related_anomaly import RelatedAnomaly
from wavefront_api_client.models.related_data import RelatedData
from wavefront_api_client.models.related_event import RelatedEvent
from wavefront_api_client.models.related_event_time_range import RelatedEventTimeRange
from wavefront_api_client.models.report_event_anomaly_dto import ReportEventAnomalyDTO
from wavefront_api_client.models.response_container import ResponseContainer
from wavefront_api_client.models.response_container_access_policy import ResponseContainerAccessPolicy
from wavefront_api_client.models.response_container_access_policy_action import ResponseContainerAccessPolicyAction
from wavefront_api_client.models.response_container_account import ResponseContainerAccount
from wavefront_api_client.models.response_container_alert import ResponseContainerAlert
from wavefront_api_client.models.response_container_cloud_integration import ResponseContainerCloudIntegration
from wavefront_api_client.models.response_container_dashboard import ResponseContainerDashboard
from wavefront_api_client.models.response_container_derived_metric_definition import ResponseContainerDerivedMetricDefinition
from wavefront_api_client.models.response_container_event import ResponseContainerEvent
from wavefront_api_client.models.response_container_external_link import ResponseContainerExternalLink
from wavefront_api_client.models.response_container_facet_response import ResponseContainerFacetResponse
from wavefront_api_client.models.response_container_facets_response_container import ResponseContainerFacetsResponseContainer
from wavefront_api_client.models.response_container_history_response import ResponseContainerHistoryResponse
from wavefront_api_client.models.response_container_ingestion_policy import ResponseContainerIngestionPolicy
from wavefront_api_client.models.response_container_integration import ResponseContainerIntegration
from wavefront_api_client.models.response_container_integration_status import ResponseContainerIntegrationStatus
from wavefront_api_client.models.response_container_list_access_control_list_read_dto import ResponseContainerListAccessControlListReadDTO
from wavefront_api_client.models.response_container_list_integration import ResponseContainerListIntegration
from wavefront_api_client.models.response_container_list_integration_manifest_group import ResponseContainerListIntegrationManifestGroup
from wavefront_api_client.models.response_container_list_notification_messages import ResponseContainerListNotificationMessages
from wavefront_api_client.models.response_container_list_service_account import ResponseContainerListServiceAccount
from wavefront_api_client.models.response_container_list_string import ResponseContainerListString
from wavefront_api_client.models.response_container_list_user_api_token import ResponseContainerListUserApiToken
from wavefront_api_client.models.response_container_maintenance_window import ResponseContainerMaintenanceWindow
from wavefront_api_client.models.response_container_map_string_integer import ResponseContainerMapStringInteger
from wavefront_api_client.models.response_container_map_string_integration_status import ResponseContainerMapStringIntegrationStatus
from wavefront_api_client.models.response_container_message import ResponseContainerMessage
from wavefront_api_client.models.response_container_metrics_policy_read_model import ResponseContainerMetricsPolicyReadModel
from wavefront_api_client.models.response_container_monitored_application_dto import ResponseContainerMonitoredApplicationDTO
from wavefront_api_client.models.response_container_monitored_cluster import ResponseContainerMonitoredCluster
from wavefront_api_client.models.response_container_monitored_service_dto import ResponseContainerMonitoredServiceDTO
from wavefront_api_client.models.response_container_notificant import ResponseContainerNotificant
from wavefront_api_client.models.response_container_paged_account import ResponseContainerPagedAccount
from wavefront_api_client.models.response_container_paged_alert import ResponseContainerPagedAlert
from wavefront_api_client.models.response_container_paged_alert_with_stats import ResponseContainerPagedAlertWithStats
from wavefront_api_client.models.response_container_paged_anomaly import ResponseContainerPagedAnomaly
from wavefront_api_client.models.response_container_paged_cloud_integration import ResponseContainerPagedCloudIntegration
from wavefront_api_client.models.response_container_paged_customer_facing_user_object import ResponseContainerPagedCustomerFacingUserObject
from wavefront_api_client.models.response_container_paged_dashboard import ResponseContainerPagedDashboard
from wavefront_api_client.models.response_container_paged_derived_metric_definition import ResponseContainerPagedDerivedMetricDefinition
from wavefront_api_client.models.response_container_paged_derived_metric_definition_with_stats import ResponseContainerPagedDerivedMetricDefinitionWithStats
from wavefront_api_client.models.response_container_paged_event import ResponseContainerPagedEvent
from wavefront_api_client.models.response_container_paged_external_link import ResponseContainerPagedExternalLink
from wavefront_api_client.models.response_container_paged_ingestion_policy import ResponseContainerPagedIngestionPolicy
from wavefront_api_client.models.response_container_paged_integration import ResponseContainerPagedIntegration
from wavefront_api_client.models.response_container_paged_maintenance_window import ResponseContainerPagedMaintenanceWindow
from wavefront_api_client.models.response_container_paged_message import ResponseContainerPagedMessage
from wavefront_api_client.models.response_container_paged_monitored_application_dto import ResponseContainerPagedMonitoredApplicationDTO
from wavefront_api_client.models.response_container_paged_monitored_cluster import ResponseContainerPagedMonitoredCluster
from wavefront_api_client.models.response_container_paged_monitored_service_dto import ResponseContainerPagedMonitoredServiceDTO
from wavefront_api_client.models.response_container_paged_notificant import ResponseContainerPagedNotificant
from wavefront_api_client.models.response_container_paged_proxy import ResponseContainerPagedProxy
from wavefront_api_client.models.response_container_paged_recent_app_map_search import ResponseContainerPagedRecentAppMapSearch
from wavefront_api_client.models.response_container_paged_recent_traces_search import ResponseContainerPagedRecentTracesSearch
from wavefront_api_client.models.response_container_paged_related_event import ResponseContainerPagedRelatedEvent
from wavefront_api_client.models.response_container_paged_report_event_anomaly_dto import ResponseContainerPagedReportEventAnomalyDTO
from wavefront_api_client.models.response_container_paged_role_dto import ResponseContainerPagedRoleDTO
from wavefront_api_client.models.response_container_paged_saved_app_map_search import ResponseContainerPagedSavedAppMapSearch
from wavefront_api_client.models.response_container_paged_saved_app_map_search_group import ResponseContainerPagedSavedAppMapSearchGroup
from wavefront_api_client.models.response_container_paged_saved_search import ResponseContainerPagedSavedSearch
from wavefront_api_client.models.response_container_paged_saved_traces_search import ResponseContainerPagedSavedTracesSearch
from wavefront_api_client.models.response_container_paged_saved_traces_search_group import ResponseContainerPagedSavedTracesSearchGroup
from wavefront_api_client.models.response_container_paged_service_account import ResponseContainerPagedServiceAccount
from wavefront_api_client.models.response_container_paged_source import ResponseContainerPagedSource
from wavefront_api_client.models.response_container_paged_span_sampling_policy import ResponseContainerPagedSpanSamplingPolicy
from wavefront_api_client.models.response_container_paged_user_group_model import ResponseContainerPagedUserGroupModel
from wavefront_api_client.models.response_container_proxy import ResponseContainerProxy
from wavefront_api_client.models.response_container_query_type_dto import ResponseContainerQueryTypeDTO
from wavefront_api_client.models.response_container_recent_app_map_search import ResponseContainerRecentAppMapSearch
from wavefront_api_client.models.response_container_recent_traces_search import ResponseContainerRecentTracesSearch
from wavefront_api_client.models.response_container_role_dto import ResponseContainerRoleDTO
from wavefront_api_client.models.response_container_saved_app_map_search import ResponseContainerSavedAppMapSearch
from wavefront_api_client.models.response_container_saved_app_map_search_group import ResponseContainerSavedAppMapSearchGroup
from wavefront_api_client.models.response_container_saved_search import ResponseContainerSavedSearch
from wavefront_api_client.models.response_container_saved_traces_search import ResponseContainerSavedTracesSearch
from wavefront_api_client.models.response_container_saved_traces_search_group import ResponseContainerSavedTracesSearchGroup
from wavefront_api_client.models.response_container_service_account import ResponseContainerServiceAccount
from wavefront_api_client.models.response_container_set_business_function import ResponseContainerSetBusinessFunction
from wavefront_api_client.models.response_container_set_source_label_pair import ResponseContainerSetSourceLabelPair
from wavefront_api_client.models.response_container_source import ResponseContainerSource
from wavefront_api_client.models.response_container_span_sampling_policy import ResponseContainerSpanSamplingPolicy
from wavefront_api_client.models.response_container_string import ResponseContainerString
from wavefront_api_client.models.response_container_tags_response import ResponseContainerTagsResponse
from wavefront_api_client.models.response_container_user_api_token import ResponseContainerUserApiToken
from wavefront_api_client.models.response_container_user_dto import ResponseContainerUserDTO
from wavefront_api_client.models.response_container_user_group_model import ResponseContainerUserGroupModel
from wavefront_api_client.models.response_container_validated_users_dto import ResponseContainerValidatedUsersDTO
from wavefront_api_client.models.response_container_void import ResponseContainerVoid
from wavefront_api_client.models.response_status import ResponseStatus
from wavefront_api_client.models.role_dto import RoleDTO
from wavefront_api_client.models.saved_app_map_search import SavedAppMapSearch
from wavefront_api_client.models.saved_app_map_search_group import SavedAppMapSearchGroup
from wavefront_api_client.models.saved_search import SavedSearch
from wavefront_api_client.models.saved_traces_search import SavedTracesSearch
from wavefront_api_client.models.saved_traces_search_group import SavedTracesSearchGroup
from wavefront_api_client.models.schema import Schema
from wavefront_api_client.models.search_query import SearchQuery
from wavefront_api_client.models.service_account import ServiceAccount
from wavefront_api_client.models.service_account_write import ServiceAccountWrite
from wavefront_api_client.models.snowflake_configuration import SnowflakeConfiguration
from wavefront_api_client.models.sortable_search_request import SortableSearchRequest
from wavefront_api_client.models.sorting import Sorting
from wavefront_api_client.models.source import Source
from wavefront_api_client.models.source_label_pair import SourceLabelPair
from wavefront_api_client.models.source_search_request_container import SourceSearchRequestContainer
from wavefront_api_client.models.span import Span
from wavefront_api_client.models.span_sampling_policy import SpanSamplingPolicy
from wavefront_api_client.models.specific_data import SpecificData
from wavefront_api_client.models.stats_model_internal_use import StatsModelInternalUse
from wavefront_api_client.models.stripe import Stripe
from wavefront_api_client.models.tags_response import TagsResponse
from wavefront_api_client.models.target_info import TargetInfo
from wavefront_api_client.models.tesla_configuration import TeslaConfiguration
from wavefront_api_client.models.timeseries import Timeseries
from wavefront_api_client.models.trace import Trace
from wavefront_api_client.models.triage_dashboard import TriageDashboard
from wavefront_api_client.models.tuple import Tuple
from wavefront_api_client.models.tuple_result import TupleResult
from wavefront_api_client.models.tuple_value_result import TupleValueResult
from wavefront_api_client.models.user_api_token import UserApiToken
from wavefront_api_client.models.user_dto import UserDTO
from wavefront_api_client.models.user_group import UserGroup
from wavefront_api_client.models.user_group_model import UserGroupModel
from wavefront_api_client.models.user_group_properties_dto import UserGroupPropertiesDTO
from wavefront_api_client.models.user_group_write import UserGroupWrite
from wavefront_api_client.models.user_model import UserModel
from wavefront_api_client.models.user_request_dto import UserRequestDTO
from wavefront_api_client.models.user_to_create import UserToCreate
from wavefront_api_client.models.validated_users_dto import ValidatedUsersDTO
from wavefront_api_client.models.void import Void
from wavefront_api_client.models.vrops_configuration import VropsConfiguration
from wavefront_api_client.models.wf_tags import WFTags
| 82.07622 | 409 | 0.92504 |
701ab26dfe1e09245bf8832de7cb95b228e4ecaf
| 6,274 |
py
|
Python
|
sktime/tests/_config.py
|
khrapovs/sktime
|
1589d007ef5dbcdc1f42f2c8278919ebed516358
|
[
"BSD-3-Clause"
] | null | null | null |
sktime/tests/_config.py
|
khrapovs/sktime
|
1589d007ef5dbcdc1f42f2c8278919ebed516358
|
[
"BSD-3-Clause"
] | null | null | null |
sktime/tests/_config.py
|
khrapovs/sktime
|
1589d007ef5dbcdc1f42f2c8278919ebed516358
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
__author__ = ["mloning"]
__all__ = ["ESTIMATOR_TEST_PARAMS", "EXCLUDE_ESTIMATORS", "EXCLUDED_TESTS"]
import numpy as np
from pyod.models.knn import KNN
from sklearn.preprocessing import FunctionTransformer, StandardScaler
from sktime.annotation.adapters import PyODAnnotator
from sktime.annotation.clasp import ClaSPSegmentation
from sktime.base import BaseEstimator
from sktime.forecasting.exp_smoothing import ExponentialSmoothing
from sktime.forecasting.structural import UnobservedComponents
from sktime.registry import (
BASE_CLASS_LIST,
BASE_CLASS_LOOKUP,
ESTIMATOR_TAG_LIST,
TRANSFORMER_MIXIN_LIST,
)
from sktime.regression.compose import ComposableTimeSeriesForestRegressor
from sktime.transformations.base import BaseTransformer
from sktime.transformations.panel.compose import (
ColumnTransformer,
SeriesToPrimitivesRowTransformer,
SeriesToSeriesRowTransformer,
)
from sktime.transformations.panel.random_intervals import RandomIntervals
from sktime.transformations.panel.shapelet_transform import RandomShapeletTransform
from sktime.transformations.panel.summarize import FittedParamExtractor
# The following estimators currently do not pass all unit tests
# https://github.com/alan-turing-institute/sktime/issues/1627
EXCLUDE_ESTIMATORS = [
# ConditionalDeseasonalizer and STLtransformer still need refactoring
# (see PR 1773, blocked through open discussion) escaping until then
"ConditionalDeseasonalizer",
"STLTransformer",
# SFA is non-compliant with any transformer interfaces, #2064
"SFA",
# requires y in fit, this is incompatible with the old testing framework
# unless it inherits from the old mixins, which hard coded the y
# should be removed once test_all_transformers has been refactored to scenarios
"TSFreshRelevantFeatureExtractor",
# PlateauFinder seems to be broken, see #2259
"PlateauFinder",
]
EXCLUDED_TESTS = {
# known issue when X is passed, wrong time indices are returned, #1364
"StackingForecaster": ["test_predict_time_index_with_X"],
# known side effects on multivariate arguments, #2072
"WindowSummarizer": ["test_methods_have_no_side_effects"],
# test fails in the Panel case for Differencer, see #2522
"Differencer": ["test_transform_inverse_transform_equivalent"],
# tagged in issue #2490
"SignatureClassifier": [
"test_classifier_on_unit_test_data",
"test_classifier_on_basic_motions",
],
# test fail with deep problem with pickling inside tensorflow.
"CNNClassifier": [
"test_fit_idempotent",
"test_persistence_via_pickle",
],
# pickling problem with local method see #2490
"ProximityStump": [
"test_persistence_via_pickle",
"test_fit_does_not_overwrite_hyper_params",
],
"ProximityTree": [
"test_persistence_via_pickle",
"test_fit_does_not_overwrite_hyper_params",
],
"ProximityForest": [
"test_persistence_via_pickle",
"test_fit_does_not_overwrite_hyper_params",
],
# sth is not quite right with the RowTransformer-s changing state,
# but these are anyway on their path to deprecation, see #2370
"SeriesToPrimitivesRowTransformer": ["test_methods_do_not_change_state"],
"SeriesToSeriesRowTransformer": ["test_methods_do_not_change_state"],
# ColumnTransformer still needs to be refactored, see #2537
"ColumnTransformer": ["test_methods_do_not_change_state"],
}
# We here configure estimators for basic unit testing, including setting of
# required hyper-parameters and setting of hyper-parameters for faster training.
SERIES_TO_SERIES_TRANSFORMER = StandardScaler()
SERIES_TO_PRIMITIVES_TRANSFORMER = FunctionTransformer(
np.mean, kw_args={"axis": 0}, check_inverse=False
)
TRANSFORMERS = [
(
"transformer1",
SeriesToSeriesRowTransformer(
SERIES_TO_SERIES_TRANSFORMER, check_transformer=False
),
),
(
"transformer2",
SeriesToSeriesRowTransformer(
SERIES_TO_SERIES_TRANSFORMER, check_transformer=False
),
),
]
ANOMALY_DETECTOR = KNN()
ESTIMATOR_TEST_PARAMS = {
FittedParamExtractor: {
"forecaster": ExponentialSmoothing(),
"param_names": ["initial_level"],
},
SeriesToPrimitivesRowTransformer: {
"transformer": SERIES_TO_PRIMITIVES_TRANSFORMER,
"check_transformer": False,
},
SeriesToSeriesRowTransformer: {
"transformer": SERIES_TO_SERIES_TRANSFORMER,
"check_transformer": False,
},
ColumnTransformer: {
"transformers": [(name, estimator, [0]) for name, estimator in TRANSFORMERS]
},
RandomShapeletTransform: {
"max_shapelets": 5,
"n_shapelet_samples": 50,
"batch_size": 20,
},
RandomIntervals: {
"n_intervals": 3,
},
ComposableTimeSeriesForestRegressor: {"n_estimators": 3},
UnobservedComponents: {"level": "local level"},
PyODAnnotator: {"estimator": ANOMALY_DETECTOR},
ClaSPSegmentation: {"period_length": 5, "n_cps": 1},
}
# We use estimator tags in addition to class hierarchies to further distinguish
# estimators into different categories. This is useful for defining and running
# common tests for estimators with the same tags.
VALID_ESTIMATOR_TAGS = tuple(ESTIMATOR_TAG_LIST)
# These methods should not change the state of the estimator, that is, they should
# not change fitted parameters or hyper-parameters. They are also the methods that
# "apply" the fitted estimator to data and useful for checking results.
NON_STATE_CHANGING_METHODS = (
"predict",
"predict_var",
"predict_proba",
"decision_function",
"transform",
# todo: add this back
# escaping this, since for some estimators
# the input format of inverse_transform assumes special col names
# "inverse_transform",
)
# The following gives a list of valid estimator base classes.
VALID_TRANSFORMER_TYPES = tuple(TRANSFORMER_MIXIN_LIST) + (BaseTransformer,)
VALID_ESTIMATOR_BASE_TYPES = tuple(BASE_CLASS_LIST)
VALID_ESTIMATOR_TYPES = (
BaseEstimator,
*VALID_ESTIMATOR_BASE_TYPES,
*VALID_TRANSFORMER_TYPES,
)
VALID_ESTIMATOR_BASE_TYPE_LOOKUP = BASE_CLASS_LOOKUP
| 36.690058 | 86 | 0.741792 |
a7e33c7b147a183b5c598f9eb3ee46a21886a9ea
| 889 |
py
|
Python
|
marketvalue.py
|
plooploops/rosterrun
|
3ee19392e358a6ee465dca36be7c4d903a64c36c
|
[
"Apache-2.0"
] | null | null | null |
marketvalue.py
|
plooploops/rosterrun
|
3ee19392e358a6ee465dca36be7c4d903a64c36c
|
[
"Apache-2.0"
] | null | null | null |
marketvalue.py
|
plooploops/rosterrun
|
3ee19392e358a6ee465dca36be7c4d903a64c36c
|
[
"Apache-2.0"
] | null | null | null |
from mathutility import median, min, max
def median_values(items = {}):
if len(items) == 0:
return None
results = {}
for k in items.keys():
results[k] = median(items[k])
return results
def min_values(items = {}):
if len(items) == 0:
return None
results = {}
for k in items.keys():
results[k] = min(items[k])
return results
def max_values(items = {}):
if len(items) == 0:
return None
results = {}
for k in items.keys():
results[k] = max(items[k])
return results
def merge_market_values(current_items = {}, recent_items = {}):
if len(recent_items) == 0:
return current_items
if len(current_items) == 0:
return recent_items
merged_items = dict({ k : v for (k, v) in recent_items.items() if v > 0 }.items() + { k : v for (k, v) in current_items.items() if v > 0 }.items())
return merged_items
| 20.204545 | 149 | 0.605174 |
d2a231d343bb69f42dc7042696ed0f00569273e7
| 21,101 |
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/aio/operations/_express_route_circuit_authorizations_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 8 |
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/aio/operations/_express_route_circuit_authorizations_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_09_01/aio/operations/_express_route_circuit_authorizations_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCircuitAuthorizationsOperations:
"""ExpressRouteCircuitAuthorizationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified authorization from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
authorization_name=authorization_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
async def get(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
**kwargs
) -> "models.ExpressRouteCircuitAuthorization":
"""Gets the specified authorization from the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCircuitAuthorization, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.ExpressRouteCircuitAuthorization
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCircuitAuthorization"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
authorization_parameters: "models.ExpressRouteCircuitAuthorization",
**kwargs
) -> "models.ExpressRouteCircuitAuthorization":
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCircuitAuthorization"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'authorizationName': self._serialize.url("authorization_name", authorization_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(authorization_parameters, 'ExpressRouteCircuitAuthorization')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
circuit_name: str,
authorization_name: str,
authorization_parameters: "models.ExpressRouteCircuitAuthorization",
**kwargs
) -> AsyncLROPoller["models.ExpressRouteCircuitAuthorization"]:
"""Creates or updates an authorization in the specified express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the express route circuit.
:type circuit_name: str
:param authorization_name: The name of the authorization.
:type authorization_name: str
:param authorization_parameters: Parameters supplied to the create or update express route
circuit authorization operation.
:type authorization_parameters: ~azure.mgmt.network.v2019_09_01.models.ExpressRouteCircuitAuthorization
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitAuthorization or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_09_01.models.ExpressRouteCircuitAuthorization]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ExpressRouteCircuitAuthorization"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
circuit_name=circuit_name,
authorization_name=authorization_name,
authorization_parameters=authorization_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitAuthorization', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations/{authorizationName}'} # type: ignore
def list(
self,
resource_group_name: str,
circuit_name: str,
**kwargs
) -> AsyncIterable["models.AuthorizationListResult"]:
"""Gets all authorizations in an express route circuit.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param circuit_name: The name of the circuit.
:type circuit_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AuthorizationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_09_01.models.AuthorizationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AuthorizationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'circuitName': self._serialize.url("circuit_name", circuit_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AuthorizationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCircuits/{circuitName}/authorizations'} # type: ignore
| 50.480861 | 232 | 0.678262 |
d197ae6c9d70e33cbdb00cc12f2fa324440ac59c
| 300 |
py
|
Python
|
pulsar/managers/util/__init__.py
|
jmchilton/pulsar
|
783b90cf0bce893a11c347fcaf6778b98e0bb062
|
[
"Apache-2.0"
] | 6 |
2018-11-03T22:43:35.000Z
|
2022-02-15T17:51:33.000Z
|
pulsar/managers/util/__init__.py
|
jmchilton/pulsar
|
783b90cf0bce893a11c347fcaf6778b98e0bb062
|
[
"Apache-2.0"
] | 3 |
2015-06-06T22:16:03.000Z
|
2015-11-12T00:22:45.000Z
|
pulsar/managers/util/__init__.py
|
jmchilton/pulsar
|
783b90cf0bce893a11c347fcaf6778b98e0bb062
|
[
"Apache-2.0"
] | 10 |
2017-04-10T21:40:22.000Z
|
2022-02-21T16:50:10.000Z
|
"""
This module and its submodules contains utilities for running external
processes and interfacing with job managers. This module should contain
functionality shared between Galaxy and the Pulsar.
"""
from galaxy.util.bunch import Bunch
from .kill import kill_pid
__all__ = ['kill_pid', 'Bunch']
| 27.272727 | 71 | 0.793333 |
37281bb7461981ccfbb3d459fcf0c95240fd0a1c
| 9,868 |
py
|
Python
|
ambari-agent/src/test/python/resource_management/TestContentSources.py
|
samyzh/ambari
|
ff73620da41697ed2ca9ece676f71ec9ba28a7d5
|
[
"Apache-2.0"
] | 1,664 |
2015-01-03T09:35:21.000Z
|
2022-03-31T04:55:24.000Z
|
ambari-agent/src/test/python/resource_management/TestContentSources.py
|
samyzh/ambari
|
ff73620da41697ed2ca9ece676f71ec9ba28a7d5
|
[
"Apache-2.0"
] | 3,018 |
2015-02-19T20:16:10.000Z
|
2021-11-13T20:47:48.000Z
|
ambari-agent/src/test/python/resource_management/TestContentSources.py
|
samyzh/ambari
|
ff73620da41697ed2ca9ece676f71ec9ba28a7d5
|
[
"Apache-2.0"
] | 1,673 |
2015-01-06T14:14:42.000Z
|
2022-03-31T07:22:30.000Z
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from unittest import TestCase
from mock.mock import patch, MagicMock
from only_for_platform import get_platform, not_for_platform, os_distro_value, PLATFORM_WINDOWS
import os
if get_platform() != PLATFORM_WINDOWS:
with patch.object(os, "geteuid", return_value=0):
from resource_management.core import sudo
reload(sudo)
from ambari_commons.os_check import OSCheck
from resource_management.core import Environment
from resource_management.core.system import System
from resource_management.core.source import StaticFile
from resource_management.core.source import DownloadSource
from resource_management.core.source import Template
from resource_management.core.source import InlineTemplate
from ambari_jinja2 import UndefinedError, TemplateNotFound
import urllib2
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
class TestContentSources(TestCase):
@patch.object(os.path, "isfile")
@patch.object(os.path, "join")
def test_static_file_absolute_path(self, join_mock, is_file_mock):
"""
Testing StaticFile source with absolute path
"""
sudo.read_file = lambda path: 'content'
is_file_mock.return_value = True
with Environment("/base") as env:
static_file = StaticFile("/absolute/path/file")
content = static_file.get_content()
self.assertEqual('content', content)
self.assertEqual(is_file_mock.call_count, 1)
self.assertEqual(join_mock.call_count, 0)
@patch.object(os.path, "isfile")
@patch.object(os.path, "join")
def test_static_file_relative_path(self, join_mock, is_file_mock):
"""
Testing StaticFile source with relative path
"""
sudo.read_file = lambda path: 'content'
is_file_mock.return_value = True
with Environment("/base") as env:
static_file = StaticFile("relative/path/file")
content = static_file.get_content()
self.assertEqual('content', content)
self.assertEqual(is_file_mock.call_count, 1)
self.assertEqual(join_mock.call_count, 1)
join_mock.assert_called_with('/base', 'files', 'relative/path/file')
@patch.object(urllib2, "build_opener")
@patch.object(urllib2, "Request")
@patch.object(os.path, "exists")
def test_download_source_get_content_nocache(self, exists_mock, request_mock, opener_mock):
"""
Testing DownloadSource.get_content without cache
"""
exists_mock.return_value = True
web_file_mock = MagicMock()
web_file_mock.read.return_value = 'web_content'
opener_mock.return_value.open = MagicMock(return_value=web_file_mock)
with Environment("/base", tmp_dir='/var/tmp/downloads') as env:
download_source = DownloadSource("http://download/source", redownload_files=True)
content = download_source.get_content()
self.assertEqual('web_content', content)
self.assertEqual(opener_mock.call_count, 1)
request_mock.assert_called_with('http://download/source')
self.assertEqual(web_file_mock.read.call_count, 1)
@patch("__builtin__.open")
@patch.object(urllib2, "Request")
@patch.object(urllib2, "build_opener")
@patch.object(os, "makedirs")
@patch.object(os.path, "exists")
@patch("resource_management.core.sudo.create_file")
def test_download_source_get_content_cache_new(self, create_mock, exists_mock, makedirs_mock, opener_mock, request_mock, open_mock):
"""
Testing DownloadSource.get_content with cache on non-cached resource
"""
exists_mock.side_effect = [True, False]
web_file_mock = MagicMock()
web_file_mock.read.return_value = 'web_content'
opener_mock.return_value.open = MagicMock(return_value=web_file_mock)
file_mock = MagicMock(name = 'file_mock')
file_mock.__enter__.return_value = file_mock
file_mock.read.return_value = 'content'
open_mock.return_value = file_mock
with Environment("/base", tmp_dir='/var/tmp/downloads') as env:
download_source = DownloadSource("http://download/source", redownload_files=False)
content = download_source.get_content()
self.assertEqual('web_content', content)
self.assertEqual(opener_mock.call_count, 1)
request_mock.assert_called_with('http://download/source')
self.assertEqual(web_file_mock.read.call_count, 1)
@patch("__builtin__.open")
@patch.object(os.path, "exists")
def test_download_source_get_content_cache_existent(self, exists_mock, open_mock):
"""
Testing DownloadSource.get_content with cache on cached resource
"""
exists_mock.side_effect = [True, True]
file_mock = MagicMock(name = 'file_mock')
file_mock.__enter__.return_value = file_mock
file_mock.read.return_value = 'cached_content'
open_mock.return_value = file_mock
with Environment("/base", tmp_dir='/var/tmp/downloads') as env:
download_source = DownloadSource("http://download/source", redownload_files=False)
content = download_source.get_content()
self.assertEqual('cached_content', content)
self.assertEqual(open_mock.call_count, 1)
self.assertEqual(file_mock.read.call_count, 1)
@patch("__builtin__.open")
@patch.object(os.path, "getmtime")
@patch.object(os.path, "exists")
def test_template_loader(self, exists_mock, getmtime_mock, open_mock):
"""
Testing template loader on existent file
"""
exists_mock.return_value = True
getmtime_mock.return_value = 10
file_mock = MagicMock(name = 'file_mock')
file_mock.__enter__.return_value = file_mock
file_mock.read.return_value = 'template content'
open_mock.return_value = file_mock
with Environment("/base") as env:
template = Template("test.j2")
self.assertEqual(open_mock.call_count, 1)
open_mock.assert_called_with('/base/templates/test.j2', 'rb')
self.assertEqual(getmtime_mock.call_count, 1)
getmtime_mock.assert_called_with('/base/templates/test.j2')
@patch.object(os.path, "exists")
def test_template_loader_fail(self, exists_mock):
"""
Testing template loader on non-existent file
"""
exists_mock.return_value = False
try:
with Environment("/base") as env:
template = Template("test.j2")
self.fail("Template should fail with nonexistent template file")
except TemplateNotFound:
pass
@patch("__builtin__.open")
@patch.object(os.path, "getmtime")
@patch.object(os.path, "exists")
def test_template_loader_absolute_path(self, exists_mock, getmtime_mock, open_mock):
"""
Testing template loader with absolute file-path
"""
exists_mock.return_value = True
getmtime_mock.return_value = 10
file_mock = MagicMock(name = 'file_mock')
file_mock.__enter__.return_value = file_mock
file_mock.read.return_value = 'template content'
open_mock.return_value = file_mock
with Environment("/base") as env:
template = Template("/absolute/path/test.j2")
self.assertEqual(open_mock.call_count, 1)
open_mock.assert_called_with('/absolute/path/test.j2', 'rb')
self.assertEqual(getmtime_mock.call_count, 1)
getmtime_mock.assert_called_with('/absolute/path/test.j2')
@patch("__builtin__.open")
@patch.object(os.path, "getmtime")
@patch.object(os.path, "exists")
def test_template_loader_arguments(self, exists_mock, getmtime_mock, open_mock):
"""
Testing template loader additional arguments in template and absolute file-path
"""
exists_mock.return_value = True
getmtime_mock.return_value = 10
file_mock = MagicMock(name = 'file_mock')
file_mock.__enter__.return_value = file_mock
file_mock.read.return_value = '{{test_arg1}} template content'
open_mock.return_value = file_mock
with Environment("/base") as env:
template = Template("/absolute/path/test.j2", [], test_arg1 = "test")
content = template.get_content()
self.assertEqual(open_mock.call_count, 1)
self.assertEqual(u'test template content', content)
open_mock.assert_called_with('/absolute/path/test.j2', 'rb')
self.assertEqual(getmtime_mock.call_count, 1)
getmtime_mock.assert_called_with('/absolute/path/test.j2')
def test_inline_template(self):
"""
Testing InlineTemplate
"""
with Environment("/base") as env:
template = InlineTemplate("{{test_arg1}} template content", [], test_arg1 = "test")
content = template.get_content()
self.assertEqual(u'test template content', content)
def test_template_imports(self):
"""
Testing Template additional imports
"""
try:
with Environment("/base") as env:
template = InlineTemplate("{{test_arg1}} template content {{os.path.join(path[0],path[1])}}", [], test_arg1 = "test", path = ["/one","two"])
content = template.get_content()
self.fail("Template.get_content should fail when evaluating unknown import")
except UndefinedError:
pass
with Environment("/base") as env:
template = InlineTemplate("{{test_arg1}} template content {{os.path.join(path[0],path[1])}}", [os], test_arg1 = "test", path = ["/one","two"])
content = template.get_content()
self.assertEqual(u'test template content /one/two', content)
| 37.520913 | 148 | 0.733178 |
7ec73347d16bf59c0a32b51f0e6dd8e7a0d82751
| 23,097 |
py
|
Python
|
dev/run-tests.py
|
CS490-Knush/spark
|
1b6755756cf831b7d90e5516899ccd78bc338eb6
|
[
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 3 |
2018-11-17T07:51:24.000Z
|
2018-11-17T08:09:19.000Z
|
dev/run-tests.py
|
CS490-Knush/spark
|
1b6755756cf831b7d90e5516899ccd78bc338eb6
|
[
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 4 |
2018-12-11T13:53:39.000Z
|
2021-09-07T14:48:26.000Z
|
dev/run-tests.py
|
CS490-Knush/spark
|
1b6755756cf831b7d90e5516899ccd78bc338eb6
|
[
"PSF-2.0",
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 |
2018-10-19T23:50:20.000Z
|
2018-10-19T23:50:20.000Z
|
#!/usr/bin/env python2
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import itertools
from optparse import OptionParser
import os
import random
import re
import sys
import subprocess
from collections import namedtuple
from sparktestsupport import SPARK_HOME, USER_HOME, ERROR_CODES
from sparktestsupport.shellutils import exit_from_command_with_retcode, run_cmd, rm_r, which
from sparktestsupport.toposort import toposort_flatten, toposort
import sparktestsupport.modules as modules
# -------------------------------------------------------------------------------------------------
# Functions for traversing module dependency graph
# -------------------------------------------------------------------------------------------------
def determine_modules_for_files(filenames):
"""
Given a list of filenames, return the set of modules that contain those files.
If a file is not associated with a more specific submodule, then this method will consider that
file to belong to the 'root' module.
>>> sorted(x.name for x in determine_modules_for_files(["python/pyspark/a.py", "sql/core/foo"]))
['pyspark-core', 'sql']
>>> [x.name for x in determine_modules_for_files(["file_not_matched_by_any_subproject"])]
['root']
"""
changed_modules = set()
for filename in filenames:
matched_at_least_one_module = False
for module in modules.all_modules:
if module.contains_file(filename):
changed_modules.add(module)
matched_at_least_one_module = True
if not matched_at_least_one_module:
changed_modules.add(modules.root)
return changed_modules
def identify_changed_files_from_git_commits(patch_sha, target_branch=None, target_ref=None):
"""
Given a git commit and target ref, use the set of files changed in the diff in order to
determine which modules' tests should be run.
>>> [x.name for x in determine_modules_for_files( \
identify_changed_files_from_git_commits("fc0a1475ef", target_ref="5da21f07"))]
['graphx']
>>> 'root' in [x.name for x in determine_modules_for_files( \
identify_changed_files_from_git_commits("50a0496a43", target_ref="6765ef9"))]
True
"""
if target_branch is None and target_ref is None:
raise AttributeError("must specify either target_branch or target_ref")
elif target_branch is not None and target_ref is not None:
raise AttributeError("must specify either target_branch or target_ref, not both")
if target_branch is not None:
diff_target = target_branch
run_cmd(['git', 'fetch', 'origin', str(target_branch+':'+target_branch)])
else:
diff_target = target_ref
raw_output = subprocess.check_output(['git', 'diff', '--name-only', patch_sha, diff_target],
universal_newlines=True)
# Remove any empty strings
return [f for f in raw_output.split('\n') if f]
def setup_test_environ(environ):
print("[info] Setup the following environment variables for tests: ")
for (k, v) in environ.items():
print("%s=%s" % (k, v))
os.environ[k] = v
def determine_modules_to_test(changed_modules):
"""
Given a set of modules that have changed, compute the transitive closure of those modules'
dependent modules in order to determine the set of modules that should be tested.
Returns a topologically-sorted list of modules (ties are broken by sorting on module names).
>>> [x.name for x in determine_modules_to_test([modules.root])]
['root']
>>> [x.name for x in determine_modules_to_test([modules.build])]
['root']
>>> [x.name for x in determine_modules_to_test([modules.graphx])]
['graphx', 'examples']
>>> x = [x.name for x in determine_modules_to_test([modules.sql])]
>>> x # doctest: +NORMALIZE_WHITESPACE
['sql', 'avro', 'hive', 'mllib', 'sql-kafka-0-10', 'examples', 'hive-thriftserver',
'pyspark-sql', 'repl', 'sparkr', 'pyspark-mllib', 'pyspark-ml']
"""
modules_to_test = set()
for module in changed_modules:
modules_to_test = modules_to_test.union(determine_modules_to_test(module.dependent_modules))
modules_to_test = modules_to_test.union(set(changed_modules))
# If we need to run all of the tests, then we should short-circuit and return 'root'
if modules.root in modules_to_test:
return [modules.root]
return toposort_flatten(
{m: set(m.dependencies).intersection(modules_to_test) for m in modules_to_test}, sort=True)
def determine_tags_to_exclude(changed_modules):
tags = []
for m in modules.all_modules:
if m not in changed_modules:
tags += m.test_tags
return tags
# -------------------------------------------------------------------------------------------------
# Functions for working with subprocesses and shell tools
# -------------------------------------------------------------------------------------------------
def determine_java_executable():
"""Will return the path of the java executable that will be used by Spark's
tests or `None`"""
# Any changes in the way that Spark's build detects java must be reflected
# here. Currently the build looks for $JAVA_HOME/bin/java then falls back to
# the `java` executable on the path
java_home = os.environ.get("JAVA_HOME")
# check if there is an executable at $JAVA_HOME/bin/java
java_exe = which(os.path.join(java_home, "bin", "java")) if java_home else None
# if the java_exe wasn't set, check for a `java` version on the $PATH
return java_exe if java_exe else which("java")
JavaVersion = namedtuple('JavaVersion', ['major', 'minor', 'patch'])
def determine_java_version(java_exe):
"""Given a valid java executable will return its version in named tuple format
with accessors '.major', '.minor', '.patch', '.update'"""
raw_output = subprocess.check_output([java_exe, "-version"],
stderr=subprocess.STDOUT,
universal_newlines=True)
raw_output_lines = raw_output.split('\n')
# find raw version string, eg 'java version "1.8.0_25"'
raw_version_str = next(x for x in raw_output_lines if " version " in x)
match = re.search(r'(\d+)\.(\d+)\.(\d+)', raw_version_str)
major = int(match.group(1))
minor = int(match.group(2))
patch = int(match.group(3))
return JavaVersion(major, minor, patch)
# -------------------------------------------------------------------------------------------------
# Functions for running the other build and test scripts
# -------------------------------------------------------------------------------------------------
def set_title_and_block(title, err_block):
os.environ["CURRENT_BLOCK"] = str(ERROR_CODES[err_block])
line_str = '=' * 72
print('')
print(line_str)
print(title)
print(line_str)
def run_apache_rat_checks():
set_title_and_block("Running Apache RAT checks", "BLOCK_RAT")
run_cmd([os.path.join(SPARK_HOME, "dev", "check-license")])
def run_scala_style_checks():
set_title_and_block("Running Scala style checks", "BLOCK_SCALA_STYLE")
run_cmd([os.path.join(SPARK_HOME, "dev", "lint-scala")])
def run_java_style_checks():
set_title_and_block("Running Java style checks", "BLOCK_JAVA_STYLE")
run_cmd([os.path.join(SPARK_HOME, "dev", "sbt-checkstyle")])
def run_python_style_checks():
set_title_and_block("Running Python style checks", "BLOCK_PYTHON_STYLE")
run_cmd([os.path.join(SPARK_HOME, "dev", "lint-python")])
def run_sparkr_style_checks():
set_title_and_block("Running R style checks", "BLOCK_R_STYLE")
if which("R"):
# R style check should be executed after `install-dev.sh`.
# Since warnings about `no visible global function definition` appear
# without the installation. SEE ALSO: SPARK-9121.
run_cmd([os.path.join(SPARK_HOME, "dev", "lint-r")])
else:
print("Ignoring SparkR style check as R was not found in PATH")
def build_spark_documentation():
set_title_and_block("Building Spark Documentation", "BLOCK_DOCUMENTATION")
os.environ["PRODUCTION"] = "1 jekyll build"
os.chdir(os.path.join(SPARK_HOME, "docs"))
jekyll_bin = which("jekyll")
if not jekyll_bin:
print("[error] Cannot find a version of `jekyll` on the system; please",
" install one and retry to build documentation.")
sys.exit(int(os.environ.get("CURRENT_BLOCK", 255)))
else:
run_cmd([jekyll_bin, "build"])
os.chdir(SPARK_HOME)
def get_zinc_port():
"""
Get a randomized port on which to start Zinc
"""
return random.randrange(3030, 4030)
def exec_maven(mvn_args=()):
"""Will call Maven in the current directory with the list of mvn_args passed
in and returns the subprocess for any further processing"""
zinc_port = get_zinc_port()
os.environ["ZINC_PORT"] = "%s" % zinc_port
zinc_flag = "-DzincPort=%s" % zinc_port
flags = [os.path.join(SPARK_HOME, "build", "mvn"), "--force", zinc_flag]
run_cmd(flags + mvn_args)
def exec_sbt(sbt_args=()):
"""Will call SBT in the current directory with the list of mvn_args passed
in and returns the subprocess for any further processing"""
sbt_cmd = [os.path.join(SPARK_HOME, "build", "sbt")] + sbt_args
sbt_output_filter = re.compile(b"^.*[info].*Resolving" + b"|" +
b"^.*[warn].*Merging" + b"|" +
b"^.*[info].*Including")
# NOTE: echo "q" is needed because sbt on encountering a build file
# with failure (either resolution or compilation) prompts the user for
# input either q, r, etc to quit or retry. This echo is there to make it
# not block.
echo_proc = subprocess.Popen(["echo", "\"q\n\""], stdout=subprocess.PIPE)
sbt_proc = subprocess.Popen(sbt_cmd,
stdin=echo_proc.stdout,
stdout=subprocess.PIPE)
echo_proc.wait()
for line in iter(sbt_proc.stdout.readline, b''):
if not sbt_output_filter.match(line):
print(line, end='')
retcode = sbt_proc.wait()
if retcode != 0:
exit_from_command_with_retcode(sbt_cmd, retcode)
def get_hadoop_profiles(hadoop_version):
"""
For the given Hadoop version tag, return a list of Maven/SBT profile flags for
building and testing against that Hadoop version.
"""
sbt_maven_hadoop_profiles = {
"hadoop2.7": ["-Phadoop-2.7"],
}
if hadoop_version in sbt_maven_hadoop_profiles:
return sbt_maven_hadoop_profiles[hadoop_version]
else:
print("[error] Could not find", hadoop_version, "in the list. Valid options",
" are", sbt_maven_hadoop_profiles.keys())
sys.exit(int(os.environ.get("CURRENT_BLOCK", 255)))
def build_spark_maven(hadoop_version):
# Enable all of the profiles for the build:
build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags
mvn_goals = ["clean", "package", "-DskipTests"]
profiles_and_goals = build_profiles + mvn_goals
print("[info] Building Spark (w/Hive 1.2.1) using Maven with these arguments: ",
" ".join(profiles_and_goals))
exec_maven(profiles_and_goals)
def build_spark_sbt(hadoop_version):
# Enable all of the profiles for the build:
build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags
sbt_goals = ["test:package", # Build test jars as some tests depend on them
"streaming-kinesis-asl-assembly/assembly"]
profiles_and_goals = build_profiles + sbt_goals
print("[info] Building Spark (w/Hive 1.2.1) using SBT with these arguments: ",
" ".join(profiles_and_goals))
exec_sbt(profiles_and_goals)
def build_spark_unidoc_sbt(hadoop_version):
set_title_and_block("Building Unidoc API Documentation", "BLOCK_DOCUMENTATION")
# Enable all of the profiles for the build:
build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags
sbt_goals = ["unidoc"]
profiles_and_goals = build_profiles + sbt_goals
print("[info] Building Spark unidoc (w/Hive 1.2.1) using SBT with these arguments: ",
" ".join(profiles_and_goals))
exec_sbt(profiles_and_goals)
def build_spark_assembly_sbt(hadoop_version, checkstyle=False):
# Enable all of the profiles for the build:
build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags
sbt_goals = ["assembly/package"]
profiles_and_goals = build_profiles + sbt_goals
print("[info] Building Spark assembly (w/Hive 1.2.1) using SBT with these arguments: ",
" ".join(profiles_and_goals))
exec_sbt(profiles_and_goals)
if checkstyle:
run_java_style_checks()
build_spark_unidoc_sbt(hadoop_version)
def build_apache_spark(build_tool, hadoop_version):
"""Will build Spark against Hive v1.2.1 given the passed in build tool (either `sbt` or
`maven`). Defaults to using `sbt`."""
set_title_and_block("Building Spark", "BLOCK_BUILD")
rm_r("lib_managed")
if build_tool == "maven":
build_spark_maven(hadoop_version)
else:
build_spark_sbt(hadoop_version)
def detect_binary_inop_with_mima(hadoop_version):
build_profiles = get_hadoop_profiles(hadoop_version) + modules.root.build_profile_flags
set_title_and_block("Detecting binary incompatibilities with MiMa", "BLOCK_MIMA")
run_cmd([os.path.join(SPARK_HOME, "dev", "mima")] + build_profiles)
def run_scala_tests_maven(test_profiles):
mvn_test_goals = ["test", "--fail-at-end"]
profiles_and_goals = test_profiles + mvn_test_goals
print("[info] Running Spark tests using Maven with these arguments: ",
" ".join(profiles_and_goals))
exec_maven(profiles_and_goals)
def run_scala_tests_sbt(test_modules, test_profiles):
sbt_test_goals = list(itertools.chain.from_iterable(m.sbt_test_goals for m in test_modules))
if not sbt_test_goals:
return
profiles_and_goals = test_profiles + sbt_test_goals
print("[info] Running Spark tests using SBT with these arguments: ",
" ".join(profiles_and_goals))
exec_sbt(profiles_and_goals)
def run_scala_tests(build_tool, hadoop_version, test_modules, excluded_tags):
"""Function to properly execute all tests passed in as a set from the
`determine_test_suites` function"""
set_title_and_block("Running Spark unit tests", "BLOCK_SPARK_UNIT_TESTS")
test_modules = set(test_modules)
test_profiles = get_hadoop_profiles(hadoop_version) + \
list(set(itertools.chain.from_iterable(m.build_profile_flags for m in test_modules)))
if excluded_tags:
test_profiles += ['-Dtest.exclude.tags=' + ",".join(excluded_tags)]
if build_tool == "maven":
run_scala_tests_maven(test_profiles)
else:
run_scala_tests_sbt(test_modules, test_profiles)
def run_python_tests(test_modules, parallelism):
set_title_and_block("Running PySpark tests", "BLOCK_PYSPARK_UNIT_TESTS")
command = [os.path.join(SPARK_HOME, "python", "run-tests")]
if test_modules != [modules.root]:
command.append("--modules=%s" % ','.join(m.name for m in test_modules))
command.append("--parallelism=%i" % parallelism)
run_cmd(command)
def run_python_packaging_tests():
set_title_and_block("Running PySpark packaging tests", "BLOCK_PYSPARK_PIP_TESTS")
command = [os.path.join(SPARK_HOME, "dev", "run-pip-tests")]
run_cmd(command)
def run_build_tests():
set_title_and_block("Running build tests", "BLOCK_BUILD_TESTS")
run_cmd([os.path.join(SPARK_HOME, "dev", "test-dependencies.sh")])
pass
def run_sparkr_tests():
set_title_and_block("Running SparkR tests", "BLOCK_SPARKR_UNIT_TESTS")
if which("R"):
run_cmd([os.path.join(SPARK_HOME, "R", "run-tests.sh")])
else:
print("Ignoring SparkR tests as R was not found in PATH")
def parse_opts():
parser = OptionParser(
prog="run-tests"
)
parser.add_option(
"-p", "--parallelism", type="int", default=4,
help="The number of suites to test in parallel (default %default)"
)
(opts, args) = parser.parse_args()
if args:
parser.error("Unsupported arguments: %s" % ' '.join(args))
if opts.parallelism < 1:
parser.error("Parallelism cannot be less than 1")
return opts
def main():
opts = parse_opts()
# Ensure the user home directory (HOME) is valid and is an absolute directory
if not USER_HOME or not os.path.isabs(USER_HOME):
print("[error] Cannot determine your home directory as an absolute path;",
" ensure the $HOME environment variable is set properly.")
sys.exit(1)
os.chdir(SPARK_HOME)
rm_r(os.path.join(SPARK_HOME, "work"))
rm_r(os.path.join(USER_HOME, ".ivy2", "local", "org.apache.spark"))
rm_r(os.path.join(USER_HOME, ".ivy2", "cache", "org.apache.spark"))
os.environ["CURRENT_BLOCK"] = str(ERROR_CODES["BLOCK_GENERAL"])
java_exe = determine_java_executable()
if not java_exe:
print("[error] Cannot find a version of `java` on the system; please",
" install one and retry.")
sys.exit(2)
java_version = determine_java_version(java_exe)
# install SparkR
if which("R"):
run_cmd([os.path.join(SPARK_HOME, "R", "install-dev.sh")])
else:
print("Cannot install SparkR as R was not found in PATH")
if os.environ.get("AMPLAB_JENKINS"):
# if we're on the Amplab Jenkins build servers setup variables
# to reflect the environment settings
build_tool = os.environ.get("AMPLAB_JENKINS_BUILD_TOOL", "sbt")
hadoop_version = os.environ.get("AMPLAB_JENKINS_BUILD_PROFILE", "hadoop2.7")
test_env = "amplab_jenkins"
# add path for Python3 in Jenkins if we're calling from a Jenkins machine
os.environ["PATH"] = "/home/anaconda/envs/py3k/bin:" + os.environ.get("PATH")
else:
# else we're running locally and can use local settings
build_tool = "sbt"
hadoop_version = os.environ.get("HADOOP_PROFILE", "hadoop2.7")
test_env = "local"
print("[info] Using build tool", build_tool, "with Hadoop profile", hadoop_version,
"under environment", test_env)
changed_modules = None
changed_files = None
if test_env == "amplab_jenkins" and os.environ.get("AMP_JENKINS_PRB"):
target_branch = os.environ["ghprbTargetBranch"]
changed_files = identify_changed_files_from_git_commits("HEAD", target_branch=target_branch)
changed_modules = determine_modules_for_files(changed_files)
excluded_tags = determine_tags_to_exclude(changed_modules)
if not changed_modules:
changed_modules = [modules.root]
excluded_tags = []
print("[info] Found the following changed modules:",
", ".join(x.name for x in changed_modules))
# setup environment variables
# note - the 'root' module doesn't collect environment variables for all modules. Because the
# environment variables should not be set if a module is not changed, even if running the 'root'
# module. So here we should use changed_modules rather than test_modules.
test_environ = {}
for m in changed_modules:
test_environ.update(m.environ)
setup_test_environ(test_environ)
test_modules = determine_modules_to_test(changed_modules)
# license checks
run_apache_rat_checks()
# style checks
if not changed_files or any(f.endswith(".scala")
or f.endswith("scalastyle-config.xml")
for f in changed_files):
run_scala_style_checks()
should_run_java_style_checks = False
if not changed_files or any(f.endswith(".java")
or f.endswith("checkstyle.xml")
or f.endswith("checkstyle-suppressions.xml")
for f in changed_files):
# Run SBT Checkstyle after the build to prevent a side-effect to the build.
should_run_java_style_checks = True
if not changed_files or any(f.endswith("lint-python")
or f.endswith("tox.ini")
or f.endswith(".py")
for f in changed_files):
run_python_style_checks()
if not changed_files or any(f.endswith(".R")
or f.endswith("lint-r")
or f.endswith(".lintr")
for f in changed_files):
run_sparkr_style_checks()
# determine if docs were changed and if we're inside the amplab environment
# note - the below commented out until *all* Jenkins workers can get `jekyll` installed
# if "DOCS" in changed_modules and test_env == "amplab_jenkins":
# build_spark_documentation()
if any(m.should_run_build_tests for m in test_modules):
run_build_tests()
# spark build
build_apache_spark(build_tool, hadoop_version)
# backwards compatibility checks
if build_tool == "sbt":
# Note: compatibility tests only supported in sbt for now
detect_binary_inop_with_mima(hadoop_version)
# Since we did not build assembly/package before running dev/mima, we need to
# do it here because the tests still rely on it; see SPARK-13294 for details.
build_spark_assembly_sbt(hadoop_version, should_run_java_style_checks)
# run the test suites
run_scala_tests(build_tool, hadoop_version, test_modules, excluded_tags)
modules_with_python_tests = [m for m in test_modules if m.python_test_goals]
if modules_with_python_tests:
run_python_tests(modules_with_python_tests, opts.parallelism)
run_python_packaging_tests()
if any(m.should_run_r_tests for m in test_modules):
run_sparkr_tests()
def _test():
import doctest
failure_count = doctest.testmod()[0]
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
main()
| 37.740196 | 100 | 0.665714 |
588f8103e5e4f32c7219b611157b3898aae5a6a4
| 2,890 |
py
|
Python
|
examples/baremetal/nlp/sst2/distilbert_base_uncased/run_engine.py
|
intel/neural-compressor
|
16a4a12045fcb468da4d33769aff2c1a5e2ba6ba
|
[
"Apache-2.0"
] | 172 |
2021-09-14T18:34:17.000Z
|
2022-03-30T06:49:53.000Z
|
examples/baremetal/nlp/sst2/distilbert_base_uncased/run_engine.py
|
intel/lp-opt-tool
|
130eefa3586b38df6c0ff78cc8807ae273f6a63f
|
[
"Apache-2.0"
] | 40 |
2021-09-14T02:26:12.000Z
|
2022-03-29T08:34:04.000Z
|
examples/baremetal/nlp/sst2/distilbert_base_uncased/run_engine.py
|
intel/neural-compressor
|
16a4a12045fcb468da4d33769aff2c1a5e2ba6ba
|
[
"Apache-2.0"
] | 33 |
2021-09-15T07:27:25.000Z
|
2022-03-25T08:30:57.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import threading
import subprocess
import time
import os
import sys
import argparse
import array
import logging
import numpy as np
import time
from utils import SST2DataSet
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("DISTILBERT")
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", default=8, type=int, help="Batch size")
parser.add_argument("--input_model", default="distilbert_base_uncased_sst2.onnx", type=str, help="input_model_path")
parser.add_argument("--output_model", default="./ir/", type=str, help="output_model_path")
parser.add_argument("--data_dir", default="./data", type=str, help="The input data dir.")
parser.add_argument("--tokenizer_dir", default= \
"distilbert-base-uncased-finetuned-sst-2-english", type=str,
help="pre-trained model tokenizer name or path")
parser.add_argument("--config", default="./bert_static.yaml", type=str, help="yaml path")
parser.add_argument('--benchmark', action='store_true', default=False)
parser.add_argument('--tune', action='store_true', default=False, help="whether quantize the model")
parser.add_argument('--mode', type=str, help="benchmark mode of performance or accuracy")
args = parser.parse_args()
return args
def main():
args = get_args()
if args.benchmark:
from neural_compressor.experimental import Benchmark, common
ds = SST2DataSet(args.data_dir, args.tokenizer_dir)
evaluator = Benchmark(args.config)
evaluator.model = common.Model(args.input_model)
evaluator.b_dataloader = common.DataLoader(ds, args.batch_size)
evaluator(args.mode)
if args.tune:
from neural_compressor.experimental import Quantization, common
ds = SST2DataSet(args.data_dir, args.tokenizer_dir)
quantizer = Quantization(args.config)
quantizer.model = common.Model(args.input_model)
quantizer.eval_dataloader = common.DataLoader(ds, args.batch_size)
quantizer.calib_dataloader = common.DataLoader(ds, args.batch_size)
q_model = quantizer.fit()
q_model.save(args.output_model)
if __name__ == '__main__':
main()
| 38.026316 | 120 | 0.716609 |
49cf7cb76f095a47b81c234c8342c2dbc094e726
| 1,872 |
py
|
Python
|
tools/harness-automation/cases_R140/sed_9_2_8.py
|
tpmanley/openthread
|
bc02c6c05cf52884bc6cd9fad8dc8fc16364a147
|
[
"BSD-3-Clause"
] | 2 |
2018-08-24T05:14:27.000Z
|
2018-09-25T03:02:36.000Z
|
tools/harness-automation/cases_R140/sed_9_2_8.py
|
tpmanley/openthread
|
bc02c6c05cf52884bc6cd9fad8dc8fc16364a147
|
[
"BSD-3-Clause"
] | 4 |
2016-09-09T17:10:04.000Z
|
2016-09-29T05:18:09.000Z
|
tools/harness-automation/cases_R140/sed_9_2_8.py
|
tpmanley/openthread
|
bc02c6c05cf52884bc6cd9fad8dc8fc16364a147
|
[
"BSD-3-Clause"
] | 1 |
2020-10-22T16:33:36.000Z
|
2020-10-22T16:33:36.000Z
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from autothreadharness.harness_case import HarnessCase
class SED_9_2_8(HarnessCase):
role = HarnessCase.ROLE_SED
case = '9 2 8'
golden_devices_required = 2
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| 39.829787 | 77 | 0.769231 |
eb26f10337fd057e39d85b41734e9abc4be885ae
| 5,289 |
py
|
Python
|
source/tests/graph_test.py
|
staujd02/python-tsp
|
39047d17a8bd3f9bd2590ab40a0a3861646b76b9
|
[
"Apache-2.0"
] | null | null | null |
source/tests/graph_test.py
|
staujd02/python-tsp
|
39047d17a8bd3f9bd2590ab40a0a3861646b76b9
|
[
"Apache-2.0"
] | null | null | null |
source/tests/graph_test.py
|
staujd02/python-tsp
|
39047d17a8bd3f9bd2590ab40a0a3861646b76b9
|
[
"Apache-2.0"
] | null | null | null |
from array import array
from source.utilities.graphStringMuxer import GraphStringMuxer
import unittest
from source.utilities.solver import Solver
from source.utilities.graph import Graph
from source.utilities.graph import BranchingGraphError
from source.dataStructures import Vector
class Graph_test(unittest.TestCase):
V = {
"D->E": Vector('D', 'E', 5),
"A->D": Vector('A', 'D', 25),
"C->D": Vector('C', 'D', 25),
"E->D": Vector('E', 'D', 30),
"A->B": Vector('A', 'B', 50),
"B->D": Vector('B', 'D', 75),
"D->C": Vector('D', 'C', 75),
"E->C": Vector('E', 'C', 75),
"B->E": Vector('B', 'E', 100),
"D->B": Vector('D', 'B', 100),
"C->A": Vector('C', 'A', 125),
"A->C": Vector('A', 'C', 150),
"E->B": Vector('E', 'B', 150),
"C->B": Vector('C', 'B', 175),
"B->C": Vector('B', 'C', 200),
"B->A": Vector('B', 'A', 0),
"C->E": Vector('C', 'E', 0),
"D->A": Vector('D', 'A', 0),
"E->A": Vector('E', 'A', 0),
}
def test_a_graph_can_copy_itself(self):
V = self.V
graph = Graph(['A->B', 'B->D', 'C->B', 'D->A'], V)
self.assertTrue(graph is not graph.copy())
self.assertEqual(str(graph), str(graph.copy()))
self.assertEqual(graph.lastChange, graph.copy().lastChange)
def disabled_test_a_graph_can_fill_out_its_trajectory_register(self):
V = self.V
graph = Graph(['A->B', 'B->D', 'C->B', 'D->A'], V)
self.assertEqual(graph.trajectoryRegister, array('i', [1, 2, 0, 1]))
def test_a_graph_can_translate_an_ascii_key_to_an_index(self):
V = self.V
graph = Graph(['A->B', 'B->D', 'C->B', 'D->A'], V)
self.assertEqual(GraphStringMuxer.translate('A'), 0)
self.assertEqual(GraphStringMuxer.translate('C'), 2)
def test_a_graph_can_go_deeper(self):
V = self.V
graph = Graph(['A->B', 'B->D', 'C->B', 'D->A'], V)
# self.assertEqual(graph.trajectoryRegister, array('i', [1, 2, 0, 1]))
w = graph.getWeight()
newWeight = graph.goDeeper(V['D->B'], w)
self.assertEqual(newWeight, w + V['D->B'][2])
self.assertEqual(graph.graphData[3], 'D->B')
self.assertEqual(graph.lastChange, 'D->A')
# self.assertEqual(graph.trajectoryRegister, array('i', [0, 3, 0, 1]))
def test_a_graph_can_go_across(self):
V = self.V
vList = ['A->B', 'B->D', 'C->B', 'D->A', 'E->A']
graph = Graph(vList, V)
graph.replace(V['D->C'])
newWeight = graph.goAcross(V['E->C'], V['D->C'][2])
expectedNewWeight = V['E->C'][2]
self.assertEqual(graph.graphData[4], 'E->C')
self.assertEqual(graph.graphData[3], 'D->A')
self.assertEqual(graph.lastChange, 'E->A')
self.assertEqual(newWeight, expectedNewWeight)
def test_a_graph_returns_none_when_constructed_with_duplicate_origin_vectors(self):
V = self.V
caughtError = False
try:
Graph(['A->B', 'B->D', 'B->C', 'D->A'], V)
except BranchingGraphError:
caughtError = True
self.assertTrue(caughtError)
def test_a_graph_can_check_for_difficult_incorrectness(self):
V = self.V
vList = ['A->B', 'B->A', 'C->E', 'D->C', 'E->D']
graph = Graph(vList, V)
self.assertEqual(graph.isValid(), False)
def test_a_can_output_a_list_of_its_vectors(self):
V = self.V
vList = ['A->B', 'B->A', 'C->E', 'D->C', 'E->D']
graph = Graph(vList, V)
self.assertEqual(graph.toVectorListString(), "{<A->B:50><B->A:0><C->E:0><D->C:75><E->D:30>}")
def test_a_graph_can_check_for_incorrectness(self):
V = self.V
vList = ['A->B', 'B->D', 'C->B', 'D->A']
graph = Graph(vList, V)
self.assertEqual(graph.isValid(), False)
def test_a_graph_can_check_for_correctness(self):
V = self.V
vList = ['A->C', 'B->A', 'C->B']
graph = Graph(vList, V)
self.assertEqual(graph.isValid(), True)
def test_graph_has_a_weight(self):
V = self.V
vList = ['A->B', 'B->C', 'C->A']
graph = Graph(vList, V)
self.assertEqual(graph.getWeight(), 375)
def test_graph_can_output_a_sensible_string(self):
V = self.V
vList = ['A->B', 'B->C', 'C->A']
graph = Graph(vList, V)
self.assertEqual(str(graph), "(A->B->C->A): 375")
def test_graph_tracks_its_modifications(self):
V = self.V
vList = ['A->B', 'B->C', 'C->E', 'D->A', 'E->D']
graph = Graph(vList, V)
self.assertEqual(None, graph.lastChange)
graph.replace(V['D->C'])
self.assertEqual('D->A', str(graph.lastChange))
def test_integrator_can_integrate_modifications_into_a_graph(self):
V = self.V
vList = ['A->B', 'B->C', 'C->E', 'D->A', 'E->D']
newList = ['A->B', 'B->A', 'C->E', 'D->C', 'E->D']
graph = Graph(vList, V)
graph.replace(V['D->C'])
graph.replace(V['B->D'])
graph.replace(V['B->A'])
self.assertEqual(str(Graph(newList, V)), str(graph))
def vectorCompare(self, v1, v2):
self.assertEqual(str(v1), str(v2))
| 37.778571 | 101 | 0.536964 |
439e05a4da9dd2b08b9248018fcdf8a5f00c9188
| 1,309 |
py
|
Python
|
deploy-agent/deployd/download/download_helper.py
|
brennentsmith/teletraan
|
55d05c6352591ed847e1f9edf63c9483897f2187
|
[
"Apache-2.0"
] | 2,449 |
2016-02-11T23:53:25.000Z
|
2022-03-27T08:43:56.000Z
|
deploy-agent/deployd/download/download_helper.py
|
brennentsmith/teletraan
|
55d05c6352591ed847e1f9edf63c9483897f2187
|
[
"Apache-2.0"
] | 360 |
2016-01-21T23:52:39.000Z
|
2022-03-25T01:24:54.000Z
|
deploy-agent/deployd/download/download_helper.py
|
brennentsmith/teletraan
|
55d05c6352591ed847e1f9edf63c9483897f2187
|
[
"Apache-2.0"
] | 322 |
2016-01-21T16:50:59.000Z
|
2022-02-23T00:56:18.000Z
|
# Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import hashlib
import logging
log = logging.getLogger(__name__)
class DownloadHelper(object):
def __init__(self, url):
self._url = url
@staticmethod
def hash_file(file_path):
sha = hashlib.sha1()
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
sha.update(chunk)
return sha.hexdigest()
@staticmethod
def md5_file(file_path):
md5 = hashlib.md5()
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
md5.update(chunk)
return md5.hexdigest()
@abc.abstractproperty
def download(self, local_full_fn):
pass
| 28.456522 | 74 | 0.663102 |
f4ce0abd0d11c20ca4dfad78402642888f91641e
| 1,750 |
py
|
Python
|
watson_endpoint.py
|
jtizon001/ideahub
|
a2ff57192db22033089a8add423a2fe35dcf0101
|
[
"MIT"
] | null | null | null |
watson_endpoint.py
|
jtizon001/ideahub
|
a2ff57192db22033089a8add423a2fe35dcf0101
|
[
"MIT"
] | null | null | null |
watson_endpoint.py
|
jtizon001/ideahub
|
a2ff57192db22033089a8add423a2fe35dcf0101
|
[
"MIT"
] | 3 |
2018-03-08T15:28:07.000Z
|
2018-03-21T16:24:23.000Z
|
import json
from watson_developer_cloud import NaturalLanguageUnderstandingV1,WatsonException
from watson_developer_cloud.natural_language_understanding_v1 import Features, EntitiesOptions, KeywordsOptions, SentimentOptions, EmotionOptions
from csv_manipulator import TweetCsv
import codecs
# Communicates with Watson NLU API
def post(tweet):
natural_language_understanding = NaturalLanguageUnderstandingV1(
username='912e28dd-f8ff-4ab5-9645-557ff6b7c597',
password='N7vecbexz7gQ',
version='2017-02-27')
response = natural_language_understanding.analyze(
text=tweet,
features=Features(
entities=EntitiesOptions(
emotion=True,
sentiment=True,
limit=5),
sentiment=SentimentOptions(
document=True),
keywords=KeywordsOptions(
emotion=True,
sentiment=True,
limit=5),
emotion=EmotionOptions(
document=True)))
return response
# Handler for stripping tweets, posting, and storing
def watson(path):
data = TweetCsv(path)
tweet = data.isolate_tweets()
print('Stripped tweet')
# resp = post(tweet)
try:
resp = post(tweet)
except (UnicodeDecodeError, WatsonException) as err:
resp='WATSON_ERROR_HEADER'
#print('Sent analysis complete: "%s"' % resp)
if resp!='WATSON_ERROR_HEADER':
store_file('output_got_sent', resp)
return resp
# Stores file in output files
def store_file(name, data):
output = codecs.open('./outputfiles/' + name + '.txt', "w+", "utf-8")
output.write(repr(data))
print('Sent analysis complete: "%s"' % json.dumps(data, sort_keys=True, indent=4))
output.close()
print('Sent analysis stored...')
| 31.25 | 145 | 0.684 |
baccd48017cbeb3e6aae0700b9104bb0a056b775
| 1,749 |
py
|
Python
|
explainaboard/utils/typing_utils.py
|
hwidjaja/ExplainaBoard
|
0e670ad2df9326eb6b4ad99ba435fd7b6806557a
|
[
"MIT"
] | null | null | null |
explainaboard/utils/typing_utils.py
|
hwidjaja/ExplainaBoard
|
0e670ad2df9326eb6b4ad99ba435fd7b6806557a
|
[
"MIT"
] | null | null | null |
explainaboard/utils/typing_utils.py
|
hwidjaja/ExplainaBoard
|
0e670ad2df9326eb6b4ad99ba435fd7b6806557a
|
[
"MIT"
] | null | null | null |
"""Generic functions to manipulate type hints."""
from __future__ import annotations
from collections.abc import Generator, Iterable
from typing import Any, Optional, TypeVar
T = TypeVar('T')
def unwrap(obj: Optional[T]) -> T:
'''Unwrap the ``Optional`` type hint.
This function takes an object wrapped with the ``Optional``, and returns itself
if the object is not ``None``. Otherwise this funciton raises ValueError.
:param obj: The object to unwrap.
:type obj: ``Optional[T]``
:return: ``obj`` itself.
:rtype: The underlying type ``T``.
:raises ValueError: ``obj`` is None.
'''
if obj is None:
raise ValueError('Attempted to unwrap None.')
return obj
def unwrap_generator(obj: Optional[Iterable[T]]) -> Generator[T, None, None]:
'''Unwrap the ``Optional`` ``Iterable``s and provides its generator.
This function takes an ``Iterable`` object wrapped by the ``Optional``, and provides
an iterator over the underlying object. If the object is ``None``, this function
yields nothing and returns immediately.
If raising ``ValueError`` when ``None`` is perferred, use ``unwrap()`` instead.
:param obj: The object to unwrap.
:type obj: ``Optional[Iterable[T]]``
:return: A generator over the underlying object.
:rtype: ``Generator[T, None, None]``
'''
if obj is not None:
yield from obj
NarrowType = TypeVar("NarrowType")
def narrow(obj: Any, narrow_type: type[NarrowType]) -> NarrowType:
"""returns the object with the narrowed type or raises a TypeError
(obj: Any, new_type: type[T]) -> T"""
if isinstance(obj, narrow_type):
return obj
else:
raise TypeError(f"{obj} is expected to be {narrow_type}")
| 31.8 | 88 | 0.66781 |
cc41b8b81cbcfa8901e18363d778d033c4c77e16
| 5,621 |
py
|
Python
|
m_theory/m_theory_lib/tf_cexpm.py
|
rmitra/google-research
|
ddc22300c4cb3223654c9a981f892dc0f6286e35
|
[
"Apache-2.0"
] | 1 |
2020-03-05T09:34:44.000Z
|
2020-03-05T09:34:44.000Z
|
m_theory/m_theory_lib/tf_cexpm.py
|
robot-ai-machinelearning/google-research
|
88481d10a87947ffb9305dc7665682e008b27391
|
[
"Apache-2.0"
] | null | null | null |
m_theory/m_theory_lib/tf_cexpm.py
|
robot-ai-machinelearning/google-research
|
88481d10a87947ffb9305dc7665682e008b27391
|
[
"Apache-2.0"
] | 1 |
2020-03-05T09:24:01.000Z
|
2020-03-05T09:24:01.000Z
|
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Alternative complex matrix exponentiation.
Provides a basic complex matrix exponentiation function `cexpm` for TensorFlow.
Complex Matrix Exponentiation that allows holomorphic backpropagation.
We need this here because TensorFlow <=1.13's tf.linalg.expm() does not
support taking Hessians. (In newer versions, it does support taking
gradients.)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
import operator
import tensorflow.compat.v1 as tf
def _get_taylor_strategy(n_max, eye, m, prod=operator.mul):
"""Finds out how to build x**N with low depth, given all the lower powers."""
depth_and_tensor_power_by_exponent = [None] * (n_max + 1)
depth_and_tensor_power_by_exponent[0] = (0, eye)
depth_and_tensor_power_by_exponent[1] = (0, m)
for n in range(2, n_max + 1):
best_depth, best_k = min(
(1 + max(depth_and_tensor_power_by_exponent[k][0],
depth_and_tensor_power_by_exponent[n - k][0]),
k) for k in range(1, n))
depth_and_tensor_power_by_exponent[n] = (
best_depth, prod(depth_and_tensor_power_by_exponent[best_k][1],
depth_and_tensor_power_by_exponent[n - best_k][1]))
return depth_and_tensor_power_by_exponent
def _complex_matmul(p, q):
"""Implements complex matrix multiplication using 'C = R + R' embedding."""
return tf.stack([
# {real1} * {real2} - {imag1} * {imag2}
tf.matmul(p[0, :, :], q[0, :, :]) - tf.matmul(p[1, :, :], q[1, :, :]),
# {real1} * {imag2} + {imag1} * {real2}
tf.matmul(p[0, :, :], q[1, :, :]) + tf.matmul(p[1, :, :], q[0, :, :])])
def tf_shallow_expm_taylor(t_m, n_max=40, name_scope='expm'):
"""Computes Taylor polynomial for matrix exponentiation via shallow graph."""
shape = t_m.shape.as_list()
dim = shape[-1]
if len(shape) == 3:
if shape[0] != 2 or shape[1] != shape[2]:
# Leading index must be for choosing between real/imaginary coefficients.
raise ValueError(
'Complex matrix must be shape [2, N, N], observed: {}'.format(shape))
eye = numpy.stack([numpy.eye(dim, dtype=numpy.float64),
numpy.zeros([dim, dim], dtype=numpy.float64)])
fn_product = _complex_matmul
else:
eye = numpy.eye(dim, dtype=numpy.float64)
fn_product = tf.matmul
#
factorials = [1] * (n_max + 1)
for n in range(2, n_max + 1):
factorials[n] = n * factorials[n - 1]
with tf.name_scope(name='', values=[t_m]) as scope:
factorials_factors = tf.constant([1.0 / v for v in factorials],
dtype=tf.float64)
taylor_strategy = _get_taylor_strategy(n_max,
tf.constant(eye),
t_m, prod=fn_product)
return tf.einsum('c,cimn->imn' if len(shape) == 3 else 'c,cmn->mn',
factorials_factors,
tf.stack([m for _, m in taylor_strategy]))
def _c64(x):
"""Wraps up a float64 constant-array as a TF constant."""
return tf.constant(x, tf.float64)
def _get_num_squarings(t_m):
"""Computes the number of squarings to use for exponentiating a tensor."""
shape = t_m.shape.as_list()
dim = shape[0]
t_l2 = (tf.einsum('iab,iab->', t_m, t_m) # Complex t_m.
if len(shape) == 3
else tf.einsum('ab,ab->', t_m, t_m)) # Real t_m.
return tf.maximum(
_c64(0.0), tf.math.ceil(tf.math.log(t_l2) / numpy.log(2)))
def _get_squaring_cascade(t_num_squarings, t_m, prod=operator.mul,
max_squarings=100):
"""Gets the TF graph cascade of squaring operations."""
def get_cascade_tail(t_m_squared_n_times, n):
if n == max_squarings:
return t_m_squared_n_times
def false_fn():
t_m_squared_n_plus_one_times = prod(t_m_squared_n_times,
t_m_squared_n_times)
return get_cascade_tail(t_m_squared_n_plus_one_times, n + 1)
#
return tf.cond(tf.equal(_c64(n), t_num_squarings),
true_fn=lambda: t_m_squared_n_times,
false_fn=false_fn)
return get_cascade_tail(t_m, 0)
def cexpm(t_m_complex, max_squarings=20, taylor_n_max=20, complex_arg=True):
"""Drop-in replacement for tf.linalg.expm(), optionally for complex arg."""
if complex_arg:
t_m = tf.stack([tf.math.real(t_m_complex), tf.math.imag(t_m_complex)])
else:
t_m = t_m_complex
fn_product = _complex_matmul if complex_arg else tf.matmul
t_num_squarings = tf.minimum(_c64(max_squarings),
_get_num_squarings(t_m))
t_m_scaled = t_m * tf.pow(_c64(0.5), t_num_squarings)
exp_t_m_scaled = tf_shallow_expm_taylor(t_m_scaled, n_max=taylor_n_max)
ret = _get_squaring_cascade(t_num_squarings, exp_t_m_scaled,
prod=fn_product,
max_squarings=max_squarings)
if complex_arg:
return tf.complex(ret[0], ret[1])
else:
return ret
| 39.865248 | 79 | 0.652731 |
f9e00895bf7301daacbeb80b6103c41960823bc8
| 3,667 |
py
|
Python
|
circuits/core/helpers.py
|
spaceone/circuits
|
ed6d5464f1f83034109ed3d23d126c715450cfd2
|
[
"MIT"
] | null | null | null |
circuits/core/helpers.py
|
spaceone/circuits
|
ed6d5464f1f83034109ed3d23d126c715450cfd2
|
[
"MIT"
] | null | null | null |
circuits/core/helpers.py
|
spaceone/circuits
|
ed6d5464f1f83034109ed3d23d126c715450cfd2
|
[
"MIT"
] | null | null | null |
"""
.. codeauthor: mnl
"""
from signal import SIGINT, SIGTERM
from sys import stderr
from threading import Event
from traceback import format_exception_only
from circuits.core.handlers import reprhandler
from .components import BaseComponent
from .handlers import handler
class FallBackGenerator(BaseComponent):
def __init__(self, *args, **kwargs):
super(FallBackGenerator, self).__init__(*args, **kwargs)
self._continue = Event()
@handler("generate_events", priority=-100)
def _on_generate_events(self, event):
"""
Fall back handler for the :class:`~.events.GenerateEvents` event.
When the queue is empty a GenerateEvents event is fired, here
we sleep for as long as possible to avoid using extra cpu cycles.
A poller would override this with a higher priority handler.
e.g: ``@handler("generate_events", priority=0)``
and provide a different way to idle when the queue is empty.
"""
with event.lock:
if event.time_left == 0:
event.stop()
self._continue.clear()
if event.time_left > 0:
# If we get here, there is no component with work to be
# done and no new event. But some component has requested
# to be checked again after a certain timeout.
self._continue.wait(event.time_left)
# Either time is over or _continue has been set, which
# implies resume has been called, which means that
# reduce_time_left(0) has been called. So calling this
# here is OK in any case.
event.reduce_time_left(0)
event.stop()
while event.time_left < 0:
# If we get here, there was no work left to do when creating
# the GenerateEvents event and there is no other handler that
# is prepared to supply new events within a limited time. The
# application will continue only if some other Thread fires
# an event.
#
# Python ignores signals when waiting without timeout.
self._continue.wait(10000)
event.stop()
def resume(self):
"""
Implements the resume method as required from components that
handle :class:`~.events.GenerateEvents`.
"""
self._continue.set()
class FallBackExceptionHandler(BaseComponent):
"""
If there is no handler for error events in the component hierarchy, this
component's handler is added automatically. It simply prints
the error information on stderr.
"""
@handler("exception", channel="*")
def _on_exception(self, error_type, value, traceback,
handler=None, fevent=None):
s = []
if handler is None:
handler = ""
else:
handler = reprhandler(handler)
msg = "ERROR {0:s} ({1:s}) ({2:s}): {3:s}\n".format(
handler, repr(fevent), repr(error_type), repr(value)
)
s.append(msg)
s.append('Traceback (most recent call last):\n')
s.extend(traceback)
s.extend(format_exception_only(error_type, value))
s.append("\n")
stderr.write("".join(s))
class FallBackSignalHandler(BaseComponent):
"""
If there is no handler for signal events in the component hierarchy, this
component's handler is added automatically. It simply terminates the
system if the signal is SIGINT or SIGTERM.
"""
@handler("signal", channel="*")
def _on_signal(self, signo, stack):
if signo in [SIGINT, SIGTERM]:
raise SystemExit(0)
| 32.451327 | 77 | 0.623943 |
d145fdacde9ed462eb818d6d28af9a79a394ef78
| 135 |
py
|
Python
|
python_100/Level1/18.del_ele.py
|
relax-space/python-cy
|
eaf4650756e7ece5ec97894b65a7495b5c964eb3
|
[
"Apache-2.0"
] | 1 |
2020-04-27T03:31:23.000Z
|
2020-04-27T03:31:23.000Z
|
python_100/Level1/18.del_ele.py
|
relax-space/python-cy
|
eaf4650756e7ece5ec97894b65a7495b5c964eb3
|
[
"Apache-2.0"
] | 1 |
2020-04-14T23:55:19.000Z
|
2020-04-15T03:29:37.000Z
|
python_100/Level1/18.del_ele.py
|
relax-space/python-cy
|
eaf4650756e7ece5ec97894b65a7495b5c964eb3
|
[
"Apache-2.0"
] | null | null | null |
# 18.Python-遍历列表时删除元素的正确做法
a = [1,2,3,4,5,6,7,8,9]
# 删除元素5和8
for i in (a[:]):
if i == 5 or i == 8:
a.remove(i)
print(a)
| 13.5 | 26 | 0.503704 |
f4214a6c32f25c0ed4b0e47231f76902eb60f8c8
| 3,564 |
py
|
Python
|
glance/async_/utils.py
|
bwLehrpool/glance
|
d4119be0543bdaefe78fc11e16c3a01b55aa9e3a
|
[
"Apache-2.0"
] | null | null | null |
glance/async_/utils.py
|
bwLehrpool/glance
|
d4119be0543bdaefe78fc11e16c3a01b55aa9e3a
|
[
"Apache-2.0"
] | null | null | null |
glance/async_/utils.py
|
bwLehrpool/glance
|
d4119be0543bdaefe78fc11e16c3a01b55aa9e3a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_concurrency import processutils as putils
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import units
from taskflow import task
from glance.i18n import _LW
LOG = logging.getLogger(__name__)
# NOTE(hemanthm): As reported in the bug #1449062, "qemu-img info" calls can
# be exploited to craft DoS attacks by providing malicious input. The process
# limits defined here are protections against such attacks. This essentially
# limits the CPU time and address space used by the process that executes
# "qemu-img info" command to 2 seconds and 1 GB respectively.
QEMU_IMG_PROC_LIMITS = putils.ProcessLimits(cpu_time=2,
address_space=1 * units.Gi)
class OptionalTask(task.Task):
def __init__(self, *args, **kwargs):
super(OptionalTask, self).__init__(*args, **kwargs)
self.execute = self._catch_all(self.execute)
def _catch_all(self, func):
# NOTE(flaper87): Read this comment before calling the MI6
# Here's the thing, there's no nice way to define "optional"
# tasks. That is, tasks whose failure shouldn't affect the execution
# of the flow. The only current "sane" way to do this, is by catching
# everything and logging. This seems harmless from a taskflow
# perspective but it is not. There are some issues related to this
# "workaround":
#
# - Task's states will shamelessly lie to us saying the task succeeded.
#
# - No revert procedure will be triggered, which means optional tasks,
# for now, mustn't cause any side-effects because they won't be able to
# clean them up. If these tasks depend on other task that do cause side
# effects, a task that cleans those side effects most be registered as
# well. For example, _ImportToFS, _MyDumbTask, _DeleteFromFS.
#
# - Ideally, optional tasks shouldn't `provide` new values unless they
# are part of an optional flow. Due to the decoration of the execute
# method, these tasks will need to define the provided methods at
# class level using `default_provides`.
#
#
# The taskflow team is working on improving this and on something that
# will provide the ability of defining optional tasks. For now, to lie
# ourselves we must.
#
# NOTE(harlowja): The upstream change that is hopefully going to make
# this easier/built-in is at: https://review.opendev.org/#/c/271116/
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exc:
msg = (_LW("An optional task has failed, "
"the failure was: %s") %
encodeutils.exception_to_unicode(exc))
LOG.warning(msg)
return wrapper
| 44.55 | 79 | 0.667508 |
5ff3ad3bc04b36a0f6eb9d1dd40515b3c783c362
| 246 |
py
|
Python
|
python/min-val-get-positive-step-by-step-sum.py
|
alirezaghey/leetcode-solutions
|
676b71b4790c64d21af91dce02e97ee47e78d523
|
[
"MIT"
] | 3 |
2020-10-10T00:14:23.000Z
|
2022-03-02T21:16:29.000Z
|
python/min-val-get-positive-step-by-step-sum.py
|
alirezaghey/leetcode-solutions
|
676b71b4790c64d21af91dce02e97ee47e78d523
|
[
"MIT"
] | null | null | null |
python/min-val-get-positive-step-by-step-sum.py
|
alirezaghey/leetcode-solutions
|
676b71b4790c64d21af91dce02e97ee47e78d523
|
[
"MIT"
] | 1 |
2021-09-14T05:16:54.000Z
|
2021-09-14T05:16:54.000Z
|
class Solution:
def minStartValue(self, nums: List[int]) -> int:
best = 1
curr = 0
for el in nums:
curr += el
if curr < 1:
best = max(best, 1 - curr)
return best
| 24.6 | 52 | 0.434959 |
4a0044af266cdd63c6169e023bf5c41c440685a0
| 8,091 |
py
|
Python
|
ICLR_2022/Cubic_10D/PI3NN/PI3NN_OOD.py
|
streeve/PI3NN
|
f7f08a195096e0388bb9230bc67c6acd6f41581a
|
[
"Apache-2.0"
] | 11 |
2021-11-08T20:38:50.000Z
|
2022-01-30T02:46:39.000Z
|
ICLR_2022/Cubic_10D/PI3NN/PI3NN_OOD.py
|
streeve/PI3NN
|
f7f08a195096e0388bb9230bc67c6acd6f41581a
|
[
"Apache-2.0"
] | 1 |
2022-01-13T19:46:32.000Z
|
2022-02-09T16:23:56.000Z
|
ICLR_2022/Cubic_10D/PI3NN/PI3NN_OOD.py
|
streeve/PI3NN
|
f7f08a195096e0388bb9230bc67c6acd6f41581a
|
[
"Apache-2.0"
] | 1 |
2021-12-17T18:38:26.000Z
|
2021-12-17T18:38:26.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import torch.optim as optim
# from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from scipy import stats
# torch.set_default_tensor_type(torch.DoubleTensor)
torch.set_default_tensor_type(torch.FloatTensor)
# device = 'cuda' if torch.cuda.is_available() else 'cpu'
device = 'cpu'
print('device', device)
# ==== fix random seed for reproducibility =====
np.random.seed(12345)
torch.manual_seed(12345)
#------------------------------------------------------
# ------- Define the three NNs ------------
#------------------------------------------------------
# ==== define the function =====
def target_fun(x):
y = x**3
y = (torch.sum(y, dim=1, keepdim=True)/10.0) + 1.0*torch.randn(x.size(0), device=device).unsqueeze(1)
return y
# ==== define the mean network =====
class UQ_Net_mean(nn.Module):
def __init__(self, nd):
super(UQ_Net_mean, self).__init__()
self.fc1 = nn.Linear(nd, 200)
self.fc2 = nn.Linear(200, 1)
def forward(self, x):
x = torch.tanh(self.fc1(x))
x = self.fc2(x)
return x
def UQ_loss(self, x, output, ydata):
loss = torch.mean((output[:, 0]-ydata[:, 0])**2)
return loss
# ==== define the upper and lower bound network =====
class UQ_Net_std(nn.Module):
def __init__(self, nd):
super(UQ_Net_std, self).__init__()
self.fc1 = nn.Linear(nd, 200)
self.fc2 = nn.Linear(200, 1)
# self.fc2.bias = torch.nn.Parameter(torch.tensor([25.0])) ## assign a large bias value of the output layer for OOD identification
def forward(self, x):
x = torch.tanh(self.fc1(x))
x = torch.sqrt(torch.square(self.fc2(x)) + 0.1)
return x
def UQ_loss(self, x, output, ydata):
loss = torch.mean((output[:, 0] - ydata[:, 0])**2)
return loss
#------------------------------------------------------
# ------- Generate the data ------------
#------------------------------------------------------
Npar = 10
Ntrain = 5000
Nout = 1
xtrain = (torch.randn(Ntrain, Npar)*1.0).to(device)
ytrain = target_fun(xtrain).to(device)
# normalize data
x_mean = torch.mean(xtrain, axis=0)
x_std = torch.std(xtrain, axis=0)
xtrain_normal = (xtrain - x_mean)/x_std
y_mean = torch.mean(ytrain, axis=0)
y_std = torch.std(ytrain, axis=0)
ytrain_normal = (ytrain - y_mean)/y_std
Nvalid = 1000
xvalid = (torch.randn(Nvalid, Npar)+2.0).to(device)
# xvalid[:,1] = xvalid[:,1] + 4.0
yvalid = target_fun(xvalid).to(device)
xvalid_normal = (xvalid - x_mean) / x_std
yvalid_normal = (yvalid - y_mean) / y_std
#------------------------------------------------------
# ------- Train the three NNs ------------
#------------------------------------------------------
criterion = nn.MSELoss()
# ====== Train the mean network for estimating mean
net = UQ_Net_mean(Npar).to(device)
net.zero_grad()
optimizer = optim.SGD(net.parameters(), lr=0.01)
Max_iter = 3000
fig = plt.figure(figsize=(6, 6))
ax = fig.add_axes()
for i in range(Max_iter):
optimizer.zero_grad()
output = net(xtrain_normal)
loss = criterion(output, ytrain_normal)
if i % 1000 == 0:
print(i, loss)
loss.backward()
optimizer.step()
## ==== Calculate the difference to get training data of U and V network
diff = (ytrain_normal - net(xtrain_normal)).detach()
mask = diff > 0
# print(mask.size())
y_up_data = diff[diff > 0].unsqueeze(1)
# x_up_data = xtrain_normal[diff>0].unsqueeze(1)
x_up_data = xtrain_normal[mask[:, 0], :]#.unsqueeze(1)
mask = diff < 0
y_down_data = -1.0 * diff[diff < 0].unsqueeze(1)
x_down_data = xtrain_normal[mask[:, 0], :]#.unsqueeze(1)
# ====== Train the U and V network for estimating upper and lower bound ====
net_up = UQ_Net_std(Npar).to(device)
net_up.zero_grad()
optimizer = optim.SGD(net_up.parameters(), lr=0.01)
for i in range(Max_iter):
optimizer.zero_grad()
output = net_up(x_up_data)
loss = criterion(output, y_up_data)
if torch.isnan(loss):
print(output, y_up_data)
exit()
if i % 1000 == 0:
print(i, loss)
loss.backward()
optimizer.step()
net_down = UQ_Net_std(Npar).to(device)
net_down.zero_grad()
optimizer = optim.SGD(net_down.parameters(), lr=0.01)
for i in range(Max_iter):
optimizer.zero_grad()
output = net_down(x_down_data)
# loss = net_up.UQ_loss(x_down_data, output, y_down_data)
loss = criterion(output, y_down_data)
if torch.isnan(loss):
print(output, y_down_data)
exit()
if i % 1000 == 0:
print(i, loss)
loss.backward()
optimizer.step()
#--------------------------------------------------------------------
# ------- Root-finding to determine alpha and beta ------------
#--------------------------------------------------------------------
quantile = 0.9
num_outlier = int(Ntrain * (1-quantile)/2)
output = net(xtrain_normal)
output_up = net_up(xtrain_normal)
output_down = net_down(xtrain_normal)
##===== find alpha =======
c_up0 = 0.0
c_up1 = 200.0
f0 = (ytrain_normal >= output + c_up0 * output_up).sum() - num_outlier
f1 = (ytrain_normal >= output + c_up1 * output_up).sum() - num_outlier
n_iter = 1000
iter = 0
while iter <= n_iter and f0*f1<0: ##f0 != 0 and f1 != 0:
c_up2 = (c_up0 + c_up1)/2.0
f2 = (ytrain_normal >= output + c_up2 * output_up).sum() - num_outlier
if f2 == 0:
break
elif f2 > 0:
c_up0 = c_up2
f0 = f2
else:
c_up1 = c_up2
f1 = f2
# print('{}, f0: {}, f1: {}, f2: {}'.format(iter, f0, f1, f2))
c_up = c_up2
##===== find beta =======
c_down0 = 0.0
c_down1 = 200.0
f0 = (ytrain_normal <= output - c_down0 * output_down).sum() - num_outlier
f1 = (ytrain_normal <= output - c_down1 * output_down).sum() - num_outlier
n_iter = 1000
iter = 0
while iter <= n_iter and f0*f1<0: ##f0 != 0 and f1 != 0:
c_down2 = (c_down0 + c_down1)/2.0
f2 = (ytrain_normal <= output - c_down2 * output_down).sum() - num_outlier
if f2 == 0:
break
elif f2 > 0:
c_down0 = c_down2
f0 = f2
else:
c_down1 = c_down2
f1 = f2
# print('{}, f0: {}, f1: {}, f2: {}'.format(iter, f0, f1, f2))
c_down = c_down2
print('optimal alpha and beta: ', c_up, c_down)
#--------------------------------------------------------------------
# ------- Save and analysis results ------------
#--------------------------------------------------------------------
##--- 1. run PIVEN, QD, SQR, DER for this 10-Cubic function;
##--- 2. for each method, save two files 'PIW_train.dat', 'PIW_test.dat'
##--- 3. Calculate confidence score for the flight delay data.
output = net(xvalid_normal)
output_up = net_up(xvalid_normal)
output_down = net_down(xvalid_normal)
PI1 = net_up(xtrain_normal) * c_up + net_down(xtrain_normal) * c_down
MPIW_array_train = (PI1 * y_std).detach().numpy()
MPIW_array_test = (output_up * c_up * y_std + c_down * output_down * y_std).detach().numpy()
MPIW_array_train = MPIW_array_train[~np.isnan(MPIW_array_train)]
MPIW_array_test = MPIW_array_test[~np.isnan(MPIW_array_test)]
print(np.shape(MPIW_array_train), np.shape(MPIW_array_test))
np.savetxt('PI3NN_MPIW_train.dat', MPIW_array_train)
np.savetxt('PI3NN_MPIW_test.dat', MPIW_array_test)
kde_train = stats.gaussian_kde(MPIW_array_train)
kde_test = stats.gaussian_kde(MPIW_array_test)
x1 = np.linspace(MPIW_array_train.min(), MPIW_array_train.max(), 100)
p1 = kde_train(x1)
x2 = np.linspace(MPIW_array_test.min(), MPIW_array_test.max(), 100)
p2 = kde_test(x2)
plt.plot(x1,p1, label='train')
plt.plot(x2,p2, label='test')
plt.legend()
plt.savefig('PI3NN_cubic10D_bias.png')
plt.show()
# print('P1 (train) mean: {}, STD: {}'.format(np.mean(p1), np.std(p1)))
# print('P2 (test) mean: {}, STD: {}'.format(np.mean(p2), np.std(p2)))
###------- Option I: calculate confidence interval
conf_score = kde_train(MPIW_array_test)/p1.max()
print(np.mean(conf_score), np.std(conf_score))
| 26.880399 | 138 | 0.597701 |
47d1d091c9e3724e32d9327734d800975a6349dc
| 3,932 |
py
|
Python
|
tests/modules/encounters/resources/test_delete_encounter.py
|
WildMeOrg/houston
|
8102229421388e44234c07ee6cb73bf705b6fba0
|
[
"Apache-2.0"
] | 6 |
2021-04-06T19:50:52.000Z
|
2022-01-19T17:42:33.000Z
|
tests/modules/encounters/resources/test_delete_encounter.py
|
WildMeOrg/houston
|
8102229421388e44234c07ee6cb73bf705b6fba0
|
[
"Apache-2.0"
] | 491 |
2021-01-20T01:10:00.000Z
|
2022-03-31T19:30:48.000Z
|
tests/modules/encounters/resources/test_delete_encounter.py
|
WildMeOrg/houston
|
8102229421388e44234c07ee6cb73bf705b6fba0
|
[
"Apache-2.0"
] | 2 |
2021-03-12T02:33:55.000Z
|
2021-03-16T20:18:43.000Z
|
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
from tests.modules.sightings.resources import utils as sighting_utils
from tests.modules.encounters.resources import utils as enc_utils
from tests.modules.individuals.resources import utils as indiv_utils
from tests import utils as test_utils
import datetime
import pytest
from tests.utils import module_unavailable
timestamp = datetime.datetime.now().isoformat() + 'Z'
@pytest.mark.skipif(module_unavailable('encounters'), reason='Encounters module disabled')
def test_delete_method(
db, flask_app_client, researcher_1, test_root, staff_user, request
):
from app.modules.sightings.models import Sighting
# we should end up with these same counts (which _should be_ all zeros!)
orig_ct = test_utils.all_count(db)
data_in = {
'startTime': timestamp,
'locationId': 'test_delete_method',
'encounters': [
{},
{'locationId': 'test2'},
],
}
uuids = sighting_utils.create_sighting(
flask_app_client, researcher_1, request, test_root, data_in
)
sighting_id = uuids['sighting']
sighting = Sighting.query.get(sighting_id)
assert sighting is not None
assert len(uuids['encounters']) == 2
enc0_id = uuids['encounters'][0]
enc1_id = uuids['encounters'][1]
assert enc0_id is not None
assert enc1_id is not None
# assign indiv to both encounters to test cascade-delete of individual as well
indiv_enc_json = {'encounters': [{'id': enc0_id}, {'id': enc1_id}]}
response = indiv_utils.create_individual(
flask_app_client,
staff_user,
data_in=indiv_enc_json,
)
individual_guid = response.json['result']['id']
assert individual_guid is not None
ct = test_utils.all_count(db)
assert ct['Sighting'] == orig_ct['Sighting'] + 1 # one more sighting
assert ct['Encounter'] == orig_ct['Encounter'] + 2 # two more encounters
assert ct['Individual'] == orig_ct['Individual'] + 1 # one more individual
# this should be ok, cuz one enc remains (no cascade effects)
enc_utils.delete_encounter(flask_app_client, staff_user, enc0_id)
ct = test_utils.all_count(db)
assert ct['Encounter'] == orig_ct['Encounter'] + 1
assert (
ct['Individual'] == orig_ct['Individual'] + 1
) # just to confirm indiv is still there
# test that sighting is correct, with single encounter remaining
get_resp = sighting_utils.read_sighting(flask_app_client, researcher_1, sighting_id)
assert len(get_resp.json['encounters']) == 1
# but this should then fail, cuz its the last enc and will take the sighting with it
response = enc_utils.delete_encounter(
flask_app_client, staff_user, enc1_id, expected_status_code=400
)
ct = test_utils.all_count(db)
assert ct['Encounter'] == orig_ct['Encounter'] + 1
assert response.json['edm_status_code'] == 604
# this will fail cuz it *only* allows sighting-cascade and we need individual also
headers = (('x-allow-delete-cascade-sighting', True),)
response = enc_utils.delete_encounter(
flask_app_client, staff_user, enc1_id, headers=headers, expected_status_code=400
)
assert response.json['edm_status_code'] == 605
# now this should work but take the sighting and individual with it as well
headers = (
('x-allow-delete-cascade-individual', True),
('x-allow-delete-cascade-sighting', True),
)
response = enc_utils.delete_encounter(
flask_app_client, staff_user, enc1_id, headers=headers
)
assert (
response.headers['x-deletedSighting-guid'] == sighting_id
) # header tells us sighting cascade-deleted
ct = test_utils.all_count(db) # back where we started
assert ct['Sighting'] == orig_ct['Sighting']
assert ct['Encounter'] == orig_ct['Encounter']
assert ct['Individual'] == orig_ct['Individual']
| 37.807692 | 90 | 0.694049 |
34e67e146e674aae93d1baaf31e8c124c840eaef
| 803 |
py
|
Python
|
python_script/pokemon_fact_writer.py
|
ReiiYuki/PokemonWeakDetector
|
ecf160ec660f518b150d0d09be8a5031c4f0d0aa
|
[
"MIT"
] | 4 |
2016-10-18T15:15:50.000Z
|
2022-01-13T23:42:37.000Z
|
python_script/pokemon_fact_writer.py
|
ReiiYuki/PokemonWeakDetector
|
ecf160ec660f518b150d0d09be8a5031c4f0d0aa
|
[
"MIT"
] | 1 |
2016-10-14T10:46:25.000Z
|
2016-10-14T10:46:25.000Z
|
python_script/pokemon_fact_writer.py
|
ReiiYuki/PokemonWeakDetector
|
ecf160ec660f518b150d0d09be8a5031c4f0d0aa
|
[
"MIT"
] | 2 |
2018-11-27T02:18:29.000Z
|
2019-11-25T20:10:41.000Z
|
import json
with open('pokemon_data.json') as poke_data:
d = json.load(poke_data)
pokemon_list = []
type_list = []
move_list = []
for i in d :
name = i['name']
pokemon_fact = 'pokemon('+name.lower()+').'
pokemon_list.append(pokemon_fact)
moves = i['moves']
for m in moves :
move = m['name']
move_list.append('have('+name.lower()+','+move.lower()+').')
types = i['types']
for t in types :
type = t['name']
type_list.append('type('+name.lower()+','+type.lower()+').')
out = open('pokefactrule.pl','w+')
for i in pokemon_list :
out.write(i+'\n')
for i in type_list :
out.write(i+'\n')
for i in move_list :
out.write(i+'\n')
out.close()
| 28.678571 | 72 | 0.511831 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.