blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8f1135a4fe1bd810fef43a3c51e1e6e339127726 | 38558ac2e78837e7f975364f03a1f55fb02103af | /OBJECT ORIENTED PROGRAMMING IN PYTHON/encap1.py | a4199e1abcb8eca386472268a776fb523aea2f59 | [] | no_license | SOURADEEP-DONNY/WORKING-WITH-PYTHON | a0bc2ff5ddab1b25563927c8f361c6512683d6ff | 5198d14f0711a3ba7f2fe8bac61d6404c20ea40c | refs/heads/master | 2023-07-14T04:49:08.399519 | 2021-08-29T15:22:33 | 2021-08-29T15:22:33 | 270,723,307 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py | class car:
def __init__(self,speed,color):
self.speed=speed
self.color=color
def set_speed(self,value):
self.speed=value
def get_speed(self):
return self.speed
maruti=car(200,'blue')
bmw=car(500,'black')
TATA_safari=car(450,'maroon')
print(TATA_safari.get_speed())
print(bmw.get_speed())
print(maruti.get_speed())
TATA_safari.speed=900
bmw.speed="NULL"
print(TATA_safari.get_speed())
print(bmw.get_speed())
print(maruti.get_speed())
print("\n\n")
# NOW DATA GETTING PROTECTION
class car:
def __init__(self,speed,color):
self.__speed=speed # Making it Private
self.__color=color # Making it Private
def set_speed(self,value):
self.__speed=value
def get_speed(self):
return self.__speed
maruti=car(200,'blue')
bmw=car(500,'black')
TATA_safari=car(450,'maroon')
print(TATA_safari.get_speed())
print(bmw.get_speed())
print(maruti.get_speed())
TATA_safari.speed=900
bmw.speed="NULL"
print(TATA_safari.get_speed())
print(bmw.get_speed())
print(maruti.get_speed())
#clearly no security
| [
"[email protected]"
] | |
3b5d898fbb6e2df88a271a412c2870043225234e | b7843e20aec7f6f60934ce2ea3ce691d4e9046cf | /[项目二] seiya/seiya/seiya/analysis/job.py | ee18b83280aac4a91c173fca71ac2e3cab0832bb | [] | no_license | redsnowc/challenge | a54a80012beed5ffe264cb09b01e1c8aaeef5384 | c097af045228c51290eae03428b6c6135bd0a5d2 | refs/heads/master | 2020-05-03T08:50:43.085943 | 2019-10-30T07:44:00 | 2019-10-30T07:44:00 | 178,537,982 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 706 | py | from seiya.db.job import Job, session
from sqlalchemy.sql import func
def analysis_top10_city():
city_list = session.query(
Job.city, func.count(Job.city)).group_by(
Job.city).order_by(func.count(Job.city).desc()).all()
data = [{'name': i[0], 'amount': i[1]} for i in city_list][:10]
return data
def analysis_top10_wage():
salary_list = session.query(Job.city, func.avg(
(Job.salary_upper + Job.salary_lower) / 2)).group_by(
Job.city).order_by(func.avg(
(Job.salary_upper + Job.salary_lower) / 2).desc()).all()
data = [{'name' : i[0], 'salary': float(i[1])}
for i in salary_list][:10]
return data
| [
"[email protected]"
] | |
654adeff233277840881df8e58bf441d22ccbea3 | 7807bb168d52a2f292e81a5ffcfd00f16dacffed | /source/scripts/wordindex.py | c94239358b2c0de054c9319e5c58458516fdc1b6 | [
"MIT"
] | permissive | paulscottrobson/rpl-c | a248dbf1d3c2b4481fd8371d7faa0827596e1e03 | 2ee72fd0a3381c8b57e7b3af1080733f76e4781d | refs/heads/master | 2020-12-05T11:10:45.403374 | 2020-01-24T16:34:06 | 2020-01-24T16:34:06 | 232,091,109 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,838 | py | # *****************************************************************************
# *****************************************************************************
#
# Name : wordindex.py
# Purpose : Allocate each keyword a specific, final identifier ID.
# Author : Paul Robson ([email protected])
# Date : 15th January 2020
#
# *****************************************************************************
# *****************************************************************************
import re
# *****************************************************************************
#
# Create a hash mapping word to ID.
#
# *****************************************************************************
class WordIndex(object):
def __init__(self):
if WordIndex.INDEX is None:
x = {}
elements = self.raw().split("\n")
for e in elements:
m = re.match("^\\s*(\d+)\\s*\\:\\:\\:\\s*(.*)$",e)
assert m is not None,"Bad line "+e
assert m.group(2).strip() not in x,"Duplicate "+e
x[m.group(2).strip()] = int(m.group(1))
WordIndex.INDEX = x
def get(self):
return WordIndex.INDEX
# *****************************************************************************
#
# RPL-C's word index. This is manually maintained and does not need
# to be ordered. It does need to be consistent.
#
# *****************************************************************************
def raw(self):
return """
0 ::: !
1 ::: $$!handler
2 ::: $$&handler
3 ::: $$@handler
4 ::: $$call
5 ::: $$comment
6 ::: $$define
7 ::: $$literal
8 ::: $$nextline
9 ::: $$string
10 ::: *
11 ::: +
12 ::: +!
13 ::: -
14 ::: -1
15 ::: ..
16 ::: /
17 ::: 0
18 ::: 0<
19 ::: 0=
20 ::: 1
21 ::: 1+
22 ::: 1-
23 ::: 10
24 ::: 100
25 ::: 1024
26 ::: 127
27 ::: 128
28 ::: 15
29 ::: 16
30 ::: 16*
31 ::: 16/
32 ::: 2
33 ::: 2*
34 ::: 2+
35 ::: 2-
36 ::: 2/
37 ::: 24
38 ::: 255
39 ::: 256
40 ::: 256*
41 ::: 256/
42 ::: 3
43 ::: 32
44 ::: 32767
45 ::: 32768
46 ::: 4
47 ::: 4*
48 ::: 4/
49 ::: 4096
50 ::: 5
51 ::: 512
52 ::: 63
53 ::: 64
54 ::: 8
55 ::: 8*
56 ::: 8/
57 ::: ;
58 ::: <
59 ::: <=
60 ::: <>
61 ::: =
62 ::: >
63 ::: >=
64 ::: ?dup
65 ::: @
66 ::: abs
67 ::: alloc
68 ::: and
69 ::: assert
70 ::: bswap
71 ::: c!
72 ::: c@
73 ::: clr
74 ::: drop
75 ::: dup
76 ::: else
77 ::: end
78 ::: endif
79 ::: for
80 ::: if
81 ::: index
82 ::: list
83 ::: max
84 ::: min
85 ::: mod
86 ::: negate
87 ::: new
88 ::: next
89 ::: nip
90 ::: not
91 ::: or
92 ::: over
93 ::: repeat
94 ::: rnd
95 ::: rot
96 ::: run
97 ::: sgn
98 ::: stop
99 ::: swap
100 ::: sys
101 ::: to.string
102 ::: until
103 ::: vlist
104 ::: xbreak
105 ::: xdump
106 ::: xor
107 ::: save
108 ::: load
109 ::: $$index
110 ::: old
111 ::: $$hexliteral
112 ::: fast
113 ::: slow
""".strip().upper()
WordIndex.INDEX = None
if __name__ == "__main__":
print(WordIndex().get()) | [
"[email protected]"
] | |
95ce651702a1657e575cdcc81117c4f133541ea6 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/116/usersdata/207/26239/submittedfiles/al1.py | 8de58f1eaa677aefe83a1c90ace636c4dc4d4889 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | from __future__ import division
r=float(input(digite um valor para raio de uma lata:'))
a=float(input('digite um valor para altura de uma lata:'))
v=(3*14159*r*r*a)
| [
"[email protected]"
] | |
971fc3a8143a232c52ec212094e29eb5b2ca0c29 | bfc25f1ad7bfe061b57cfab82aba9d0af1453491 | /data/external/repositories_2to3/206102/kaggle-lmgpip-master/create_datasets.py | 164f42a999dbbc6ec8339da8be823a36d9f961c4 | [
"MIT"
] | permissive | Keesiu/meta-kaggle | 77d134620ebce530d183467202cf45639d9c6ff2 | 87de739aba2399fd31072ee81b391f9b7a63f540 | refs/heads/master | 2020-03-28T00:23:10.584151 | 2018-12-20T19:09:50 | 2018-12-20T19:09:50 | 147,406,338 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,393 | py | import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.decomposition import *
from sklearn.preprocessing import OneHotEncoder
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
import xgboost as xgb
from sklearn.feature_extraction import DictVectorizer
# LabelEncoder
train = pd.read_csv('../input/train.csv', index_col=None)
test = pd.read_csv('../input/test.csv', index_col=None)
train_cols = train.columns
test_cols = test.columns
labels = np.array(train.Hazard).ravel()
train_ids = np.array(train.Id).ravel()
test_ids = np.array(test.Id).ravel()
train.drop('Id', axis=1, inplace=True)
train.drop('Hazard', axis=1, inplace=True)
test.drop('Id', axis=1, inplace=True)
train = np.array(train)
test = np.array(test)
for i in range(train.shape[1]):
lbl = preprocessing.LabelEncoder()
lbl.fit(list(train[:, i]) + list(test[:, i]))
train[:, i] = lbl.transform(train[:, i])
test[:, i] = lbl.transform(test[:, i])
train = np.column_stack((train_ids, labels, train))
test = np.column_stack((test_ids, test))
train = pd.DataFrame(train, columns=train_cols)
test = pd.DataFrame(test, columns=test_cols)
train['Id'] = train['Id'].astype(int)
train['Hazard'] = train['Hazard'].astype(int)
test['Id'] = test['Id'].astype(int)
train.to_csv('../input/train2.csv', index=None)
test.to_csv('../input/test2.csv', index=None)
# DictVectorizer
train = pd.read_csv('../input/train.csv', index_col=None)
test = pd.read_csv('../input/test.csv', index_col=None)
train_cols = train.columns
test_cols = test.columns
labels = np.array(train.Hazard).ravel().astype(int)
train_ids = np.array(train.Id).ravel().astype(int)
test_ids = np.array(test.Id).ravel().astype(int)
train.drop('Id', axis=1, inplace=True)
train.drop('Hazard', axis=1, inplace=True)
test.drop('Id', axis=1, inplace=True)
train = list(train.T.reset_index(drop=True).to_dict().values())
test = list(test.T.reset_index(drop=True).to_dict().values())
vec = DictVectorizer(sparse=False)
train = vec.fit_transform(train)
test = vec.transform(test)
train = np.column_stack((train_ids, labels, train))
test = np.column_stack((test_ids, test))
train = pd.DataFrame(train, columns=['Id', 'Hazard'] + vec.get_feature_names())
test = pd.DataFrame(test, columns=['Id'] + vec.get_feature_names())
train['Id'] = train['Id'].astype(int)
train['Hazard'] = train['Hazard'].astype(int)
test['Id'] = test['Id'].astype(int)
train.to_csv('../input/train3.csv', index=None)
test.to_csv('../input/test3.csv', index=None)
# Factors to hazard mean
train = pd.read_csv('../input/train.csv', index_col=None)
test = pd.read_csv('../input/test.csv', index_col=None)
train_cols = train.columns
test_cols = test.columns
labels = train.Hazard.astype(int)
train_ids = train.Id.astype(int)
test_ids = test.Id.astype(int)
train.drop('Id', axis=1, inplace=True)
test.drop('Id', axis=1, inplace=True)
for feat in train.select_dtypes(include=['object']).columns:
m = train.groupby([feat])['Hazard'].mean()
train[feat].replace(m, inplace=True)
test[feat].replace(m, inplace=True)
train = pd.concat((train_ids, train), axis=1)
test = pd.concat((test_ids, test), axis=1)
train.to_csv('../input/train4.csv', index=None)
test.to_csv('../input/test4.csv', index=None)
| [
"[email protected]"
] | |
8bfb2b7badb4bc236ae0884aa55aee8070aeb958 | 59421a289ad349975b595de99276ee6b66a01a0e | /torch/_prims/nvfuser_prims.py | f37a21459e0cdd27cd5dea2436898ef9f094be8b | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | kevinstephano/pytorch | 88fef28c78b26e558abb232df4d2e02042369ca1 | 59001d05b406bb00d5838f04ca972180e1a4946e | refs/heads/master | 2022-11-22T23:40:10.851699 | 2022-10-29T20:36:20 | 2022-10-29T20:36:20 | 468,196,571 | 0 | 0 | NOASSERTION | 2022-03-10T04:45:25 | 2022-03-10T04:45:25 | null | UTF-8 | Python | false | false | 16,892 | py | # Module for defining "primitive" operations executable by the nvFuser. This
# list exists to decouple main set of primitives from the ones that provide a
# lowering of the op to nvFuser’s Python interface. Mostly torch.ops.nvprims is
# a subset of the primitives in torch.ops.prims, but some additional primitives
# can be added in the future for the corresponding higher-level torch/aten
# functions.
from typing import Any, Dict, Optional
import torch
from torch._prims_common import (
DimsSequenceType,
ELEMENTWISE_TYPE_PROMOTION_KIND,
getnvFuserDtype,
make_contiguous_strides_for,
ShapeType,
TensorLikeType,
)
from torch._prims_common.wrappers import (
backwards_not_supported,
elementwise_type_promotion_wrapper,
)
nvprim_namespace = "nvprims"
nvprim = torch.library.Library(nvprim_namespace, "DEF")
nvprim_impl = torch.library.Library(
nvprim_namespace, "IMPL", "CompositeExplicitAutograd"
)
nvprim_implicit_impl = torch.library.Library(
nvprim_namespace, "IMPL", "CompositeImplicitAutograd"
)
nvprim_autograd_impl = torch.library.Library(nvprim_namespace, "IMPL", "Autograd")
nvprim_meta_impl = torch.library.Library(nvprim_namespace, "IMPL", "Meta")
nvprim_names = [
"abs",
"acos",
"asin",
"atan",
"atanh",
"cos",
"cosh",
"clone",
"bitwise_not",
"ceil",
"erf",
"erfc",
"exp",
"expm1",
"floor",
"imag",
"isfinite",
"lgamma",
"log",
"log1p",
"log2",
"log10",
"real",
"reciprocal",
"neg",
"round",
"rsqrt",
"sign",
"sin",
"sinh",
"sqrt",
"tan",
"tanh",
"transpose",
"trunc",
"add",
"atan2",
"bitwise_and",
"bitwise_or",
"bitwise_xor",
"div",
"eq",
"fmod",
"ge",
"gt",
"le",
"lt",
"mul",
"ne",
"pow",
"remainder",
"sub",
"squeeze",
"view_of",
"broadcast_in_dim",
"where",
"convert_element_type",
"sum",
"var",
"amax",
"amin",
]
_nvfuser_impls: Dict[str, Any] = {}
_nvfuser_unary_ops = {
"abs",
"acos",
"asin",
"atan",
"atanh",
"cos",
"cosh",
"bitwise_not",
"ceil",
"erf",
"erfc",
"exp",
"expm1",
"floor",
"imag",
"isfinite",
"lgamma",
"log",
"log1p",
"log2",
"log10",
"reciprocal",
"neg",
"real",
"round",
"rsqrt",
"sign",
"sin",
"sinh",
"sqrt",
"tan",
"tanh",
"trunc",
}
def _assert_nvfuser_op_exists(fname: str):
try:
from torch._C._nvfuser import FusionDefinition as fd # type: ignore[import]
assert getattr(fd.Operators, fname)
except ImportError:
# Not all PyTorch builds have nvfuser
pass
for fname in _nvfuser_unary_ops:
exec(
f"""
# Ensure that the nvfuser implementation exists
_assert_nvfuser_op_exists("{fname}")
def _{fname}_nvfuser(fd, a):
return fd.ops.{fname}(a) # type: ignore[attr-defined]
_nvfuser_impls["{fname}"] = _{fname}_nvfuser
"""
)
_nvfuser_binary_ops = {
"add",
"atan2",
"bitwise_and",
"bitwise_or",
"bitwise_xor",
"div",
"eq",
"fmod",
"ge",
"gt",
"le",
"lt",
"mul",
"ne",
"pow",
"remainder",
"sub",
}
for fname in _nvfuser_binary_ops:
exec(
f"""
# Ensure that the nvfuser implementation exists
_assert_nvfuser_op_exists("{fname}")
def _{fname}_nvfuser(fd, a, b):
return fd.ops.{fname}(a, b) # type: ignore[attr-defined]
_nvfuser_impls["{fname}"] = _{fname}_nvfuser
"""
)
_nvfuser_ternary_ops = {
"where",
}
for fname in _nvfuser_ternary_ops:
exec(
f"""
# Ensure that the nvfuser implementation exists
_assert_nvfuser_op_exists("{fname}")
def _{fname}_nvfuser(fd, a, b, c):
return fd.ops.{fname}(a, b, c) # type: ignore[attr-defined]
_nvfuser_impls["{fname}"] = _{fname}_nvfuser
"""
)
def _native_batch_norm_nvfuser(
fd, input, weight, bias, running_mean, running_var, training, momentum, eps
):
if weight is None:
weight = fd.define_null_tensor()
if bias is None:
bias = fd.define_null_tensor()
if running_mean is None:
running_mean = fd.define_null_tensor()
if running_var is None:
running_var = fd.define_null_tensor()
return fd.ops.batch_norm(
input,
weight,
bias,
running_mean,
running_var,
training,
momentum,
eps,
)
def _broadcast_in_dim_nvfuser(
fd: Any,
a: TensorLikeType,
shape: ShapeType,
broadcast_dimensions: ShapeType,
):
return fd.ops.broadcast_in_dim(a, shape, broadcast_dimensions) # type: ignore[attr-defined]
def _convert_element_type_nvfuser(fd: Any, a: TensorLikeType, dtype: torch.dtype):
nvfuser_dtype = getnvFuserDtype(dtype)
return fd.ops.cast(a, nvfuser_dtype) # type: ignore[attr-defined]
def _transpose_nvfuser(fd, a, permutation):
return fd.ops.permute(a, permutation) # type: ignore[attr-defined]
def _squeeze_nvfuser(fd, a, a_shape, dimensions):
for idx in reversed(sorted(dimensions)):
a = fd.ops.squeeze(a, a_shape, idx)
a_shape = a_shape[:idx] + a_shape[idx + 1 :]
return a
def _view_of_nvfuser(fd, a):
return fd.ops.set(a)
def _view_nvfuser(
fd,
a,
a_shape,
new_shape,
):
return fd.ops.view(a, a_shape, new_shape)
def _sum_nvfuser(
fd: Any,
a: TensorLikeType,
dims: DimsSequenceType,
):
keep_dims = False
output_dtype = torch._C._nvfuser.DataType.Null
return fd.ops.sum(a, dims, keep_dims, output_dtype)
def _var_nvfuser(
fd: Any,
a: TensorLikeType,
dims: DimsSequenceType,
*,
correction: int,
):
keep_dims = False
return fd.ops.var(a, dims, correction, keep_dims)
def _var_mean_nvfuser(
fd: Any,
a: TensorLikeType,
dims: DimsSequenceType,
unbiased: Optional[bool] = None,
keepdim: bool = False,
*,
correction: int,
):
# Unbiased arg shouldn't be set when this function is called
assert unbiased is None
# Ignore keepdim arg, because currently it's automatically converted into nvfuser's symbolic scalar
# keepdim is handled by the reference implementation
keepdim = False
return fd.ops.var_mean(a, dims, correction, keepdim)
def _rand_like_nvfuser(fd: Any, a: TensorLikeType):
return fd.ops.rand_like(a)
def _amax_nvfuser(
fd: Any,
a: TensorLikeType,
dims: DimsSequenceType,
):
keep_dims = False
return fd.ops.max(a, dims, keep_dims)
def _amin_nvfuser(
fd: Any,
a: TensorLikeType,
dims: DimsSequenceType,
):
keep_dims = False
return fd.ops.min(a, dims, keep_dims)
def _clone_nvfuser(fd: Any, input: TensorLikeType, *, memory_format=None):
return fd.ops.set(input)
_nvfuser_impls["native_batch_norm"] = _native_batch_norm_nvfuser
_nvfuser_impls["broadcast_in_dim"] = _broadcast_in_dim_nvfuser
_nvfuser_impls["convert_element_type"] = _convert_element_type_nvfuser
_nvfuser_impls["clone"] = _clone_nvfuser
_nvfuser_impls["transpose"] = _transpose_nvfuser
_nvfuser_impls["squeeze"] = _squeeze_nvfuser
_nvfuser_impls["view_of"] = _view_of_nvfuser
_nvfuser_impls["view"] = _view_nvfuser
_nvfuser_impls["rand_like"] = _rand_like_nvfuser
_nvfuser_impls["sum"] = _sum_nvfuser
_nvfuser_impls["var"] = _var_nvfuser
_nvfuser_impls["var_mean"] = _var_mean_nvfuser
_nvfuser_impls["amax"] = _amax_nvfuser
_nvfuser_impls["amin"] = _amin_nvfuser
def register_native_batch_norm():
"""This function is used to register the native_batch_norm function in torch.ops.nvprims module."""
name = "native_batch_norm"
nvprim.define(
f"{name}(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, "
+ "bool training, float momentum, float eps)"
+ " -> (Tensor, Tensor, Tensor)"
)
def _prim_impl(
input, weight, bias, running_mean, running_var, training, momentum, eps
):
return torch.native_batch_norm(
input, weight, bias, running_mean, running_var, training, momentum, eps
)
nvprim_impl.impl(name, _prim_impl)
nvprim_autograd_impl.impl(
name, backwards_not_supported(torch.ops.nvprims.native_batch_norm.default)
)
prim_packet = torch.ops.nvprims.native_batch_norm
prim = prim_packet.default
for p in (prim_packet, prim):
p.__doc__ = "Computes batch normalization."
p.impl_nvfuser = _nvfuser_impls["native_batch_norm"]
p.return_type = torch._prims_common.RETURN_TYPE.NEW # type: ignore[attr-defined]
def register_rand_like():
name = "rand_like"
nvprim.define(
"rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, "
+ "Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor"
)
def _meta_rand_like(
self,
*,
dtype=None,
layout=None,
device=None,
pin_memory=None,
memory_format=None,
):
strides = make_contiguous_strides_for(self.shape)
return torch._prims.TensorMeta(
self,
shape=self.shape,
strides=strides,
dtype=dtype,
device=device,
)
def _prim_impl(
self,
*,
dtype=None,
layout=None,
device=None,
pin_memory=None,
memory_format=None,
):
return torch.rand_like(
self,
dtype=dtype,
layout=layout,
device=device,
pin_memory=pin_memory,
memory_format=memory_format,
)
nvprim_impl.impl(name, _prim_impl)
nvprim_meta_impl.impl(name, _meta_rand_like)
prim_packet = getattr(torch.ops.nvprims, name)
prim = prim_packet.default
nvprim_autograd_impl.impl(name, backwards_not_supported(prim))
for p in (prim_packet, prim):
p.__doc__ = "Computes rand_like"
p.impl_nvfuser = _nvfuser_impls["rand_like"]
p.return_type = torch._prims_common.RETURN_TYPE.NEW # type: ignore[attr-defined]
def register_var_mean():
"""This function is used to register the var_mean function in torch.ops.nvprims module."""
name = "var_mean.main"
# This overload must be default for correct dispatching of var_mean(Tensor, bool)
nvprim.define("var_mean(Tensor inp, bool unbiased) -> (Tensor, Tensor)")
# This signature tries to combine several overloads of the torch.var_mean function into one overload.
nvprim.define(
f"{name}(Tensor inp, int[1]? dim=None, bool? unbiased=None, bool keepdim=False, *, int? correction=None)"
+ " -> (Tensor, Tensor)"
)
# This function is used for device="meta" Tensors.
def _meta_var_mean(inp, dim=None, unbiased=None, keepdim=False, *, correction=None):
if torch._prims_common.is_complex_dtype(inp.dtype):
output_dtype = torch._prims_common.corresponding_real_dtype(inp.dtype)
else:
output_dtype = inp.dtype
var = torch._prims._reduction_meta(inp, dim, output_dtype=output_dtype)
mean = torch._prims._reduction_meta(inp, dim, output_dtype=inp.dtype)
if keepdim:
output_shape = [
inp.shape[i] if i not in dim else 1 for i in range(inp.ndim)
]
broadcast_dims = [i for i in range(inp.ndim) if i not in dim]
var = torch.ops.nvprims.broadcast_in_dim(var, output_shape, broadcast_dims)
mean = torch.ops.nvprims.broadcast_in_dim(
mean, output_shape, broadcast_dims
)
return (var, mean)
# This function is used under _AutoDispatchBelowAutograd context
def _prim_impl(inp, dim=None, unbiased=None, keepdim=False, *, correction=None):
correction = torch._prims_common.set_correction(unbiased, correction)
return torch.var_mean(inp, dim, correction=correction, keepdim=keepdim)
nvprim_impl.impl(name, _prim_impl)
nvprim_meta_impl.impl(name, _meta_var_mean)
prim_packet = torch.ops.nvprims.var_mean
prim = prim_packet.main
def _unbiased_overload_impl(inp, unbiased):
return prim(inp, dim=None, unbiased=unbiased)
nvprim_implicit_impl.impl("var_mean", _unbiased_overload_impl)
@elementwise_type_promotion_wrapper(
type_promoting_args=("a",),
type_promotion_kind=ELEMENTWISE_TYPE_PROMOTION_KIND.COMPLEX_TO_FLOAT,
)
def _var_mean_ref(a, dim=None, unbiased=None, keepdim=False, *, correction=None):
correction = torch._prims_common.set_correction(unbiased, correction)
# reduces over all dimensions if dim=() is passed
if dim == () or dim == []:
dim = None
dim = torch._prims_common.reduction_dims(a.shape, dim)
# For complex tensors eager computes the variance as the sum of variances of
# the real and imaginary parts
# TODO: Creating a complex tensor from real and imaginary parts is not supported
if torch._prims_common.is_complex_dtype(a.dtype):
raise NotImplementedError("Complex tensors are not supported")
var_mean = prim(a, dim, correction=correction)
if keepdim:
output_shape = [a.shape[i] if i not in dim else 1 for i in range(a.ndim)]
broadcast_dims = [i for i in range(a.ndim) if i not in dim]
var, mean = var_mean
var = torch.ops.nvprims.broadcast_in_dim(var, output_shape, broadcast_dims)
mean = torch.ops.nvprims.broadcast_in_dim(
mean, output_shape, broadcast_dims
)
var_mean = (var, mean)
return var_mean
def _var_mean_autograd(
a, dim=None, unbiased=None, keepdim=False, *, correction=None
):
# This wrapper is needed to convert prims calls inside
# elementwise_type_promotion_wrapper to nvprims calls
from torch._prims.context import NvfuserPrimsMode
with NvfuserPrimsMode():
return backwards_not_supported(_var_mean_ref)(
a, dim, unbiased, keepdim, correction=correction
)
nvprim_autograd_impl.impl(name, _var_mean_autograd)
for p in (prim_packet, prim):
p.__doc__ = "Computes the variance and mean of x over the list of dimensions specified in the dim argument"
p.impl_nvfuser = _nvfuser_impls["var_mean"]
p.return_type = torch._prims_common.RETURN_TYPE.NEW # type: ignore[attr-defined]
def _nvprims_view_impl_aten(a, original_shape, new_shape):
return a.reshape(new_shape)
def register_view():
"""This function is used to register the view function in torch.ops.view module."""
# View is implemented as a decomposition into prims.split_dim,
# prims.collapse_dim, and prims.reshape, but we would like to intercept
# non-decomposed view for now
name = "view"
nvprim.define("view(Tensor inp, SymInt[] original_shape, SymInt[] shape) -> Tensor")
nvprim.define("view.shape(Tensor inp, SymInt[] shape) -> Tensor")
# This function is used under _AutoDispatchBelowAutograd context
def _prim_impl(a, original_shape, new_shape):
return a.reshape(new_shape)
nvprim_impl.impl(name, _prim_impl)
prim_packet = torch.ops.nvprims.view
prim = prim_packet.default
def _view_no_original_shape_overload_impl(a, shape):
if list(a.shape) == list(shape):
return torch.ops.nvprims.view_of(a)
return torch.ops.nvprims.view.default(a, a.shape, shape)
nvprim_implicit_impl.impl("view.shape", _view_no_original_shape_overload_impl)
nvprim_autograd_impl.impl(name, backwards_not_supported(prim))
for p in (prim_packet, prim):
p.__doc__ = "Creates a tensor with the specified shape containing a copy of the data in a."
p.impl_nvfuser = _nvfuser_impls["view"]
p.return_type = torch._prims_common.RETURN_TYPE.VIEW # type: ignore[attr-defined]
p.impl_aten = _nvprims_view_impl_aten
def register_nvprims():
"""Registers all nvFuser primitives in the torch.ops.nvprims module."""
register_var_mean()
register_view()
register_native_batch_norm()
register_rand_like()
for name in nvprim_names:
main_prim = getattr(torch.ops.prims, name)
nvprim.define(main_prim.schema)
nvprim_impl.impl(name, main_prim.prim_impl)
nvprim_meta_impl.impl(name, main_prim.prim_meta_impl)
prim_packet = getattr(torch.ops.nvprims, name)
prim = prim_packet.default
nvprim_autograd_impl.impl(name, backwards_not_supported(prim))
for p in (prim_packet, prim):
p.__doc__ = main_prim.__doc__
p.impl_nvfuser = _nvfuser_impls[name]
p.return_type = main_prim.return_type # type: ignore[attr-defined]
p.impl_aten = main_prim.impl_aten
| [
"[email protected]"
] | |
76a28d2c4cfe2561d571b026c4614f17660b2703 | 816955010ba7bcd234688e502a29f522ece05771 | /facility_management/patches/v0_1/add_initial_violation_categories.py | 8e3e91fe009063ac257842d0672661b623f85987 | [
"MIT"
] | permissive | f-9t9it/facility_management | a87a6c5b2840b152e6f97476089bcbd0e2f75cb7 | b17d1c47b543427700acdddf91490b59c5357e50 | refs/heads/master | 2021-07-05T22:18:03.516323 | 2021-05-12T19:52:32 | 2021-05-12T19:52:32 | 241,338,459 | 4 | 4 | NOASSERTION | 2021-05-12T19:52:33 | 2020-02-18T10:57:12 | Python | UTF-8 | Python | false | false | 449 | py | import frappe
def execute():
for violation_category in ['Building & Structure', 'Exteriors', 'General Rules', 'Noise', 'Pets', 'Trash',
'Vehicles & Parking', 'Others']:
if not frappe.db.exists('Tenant Violation Category', violation_category):
frappe.get_doc({
'doctype': 'Tenant Violation Category',
'category_name': violation_category
}).insert()
| [
"[email protected]"
] | |
032ededfa1b1cba658ad9a14fb859fbe8d229a97 | a190c38740ff06e12e26b149c49a99595b11a09f | /File_Handling/04_file_delete.py | 8d96e042987be3e831df9f43702b7284d9167b04 | [
"MIT"
] | permissive | MNikov/Python-Advanced-September-2020 | 7d131f060afab1252ea3e9d709acf076086f5f54 | 1d65039de7f094d908411afffa8aee9689ab4220 | refs/heads/master | 2023-01-31T20:50:21.222223 | 2020-12-16T07:41:41 | 2020-12-16T07:41:41 | 295,539,475 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | import os
path = 'my_first_file.txt'
if os.path.exists(path):
os.remove(path)
else:
print('File already deleted!')
| [
"[email protected]"
] | |
976a93d9868ac5c4863f05814848d48b2d35828b | b40e5ea1bc1d83bfc94641a3469eeb866f4df24b | /hwk03/tests/test_roni.py | 14eaa40040fe140b4894a8d25ba57f2450ec2937 | [] | no_license | snowdj/LS-88-Demography | d250c9fea4979dca8f05d61a9f9e023784465fcb | c0b125474701fc00f2f285857a4caf08151684c8 | refs/heads/master | 2020-09-27T08:06:07.006160 | 2018-11-30T23:43:06 | 2018-11-30T23:43:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | test = {
'name': 'Question',
'points': 1,
'suites': [
{
'type': 'wwpp',
'cases': [
{
'code': r"""
>>> np.isclose(unpd_roni.where('area', 'Malawi').where('period', 2000).column('roni').item(0), 28.261)
True
""",
'hidden': False
},
{
'code': r"""
>>> np.isclose(unpd_roni.where('area', 'Germany').where('period', 1960).column('roni').item(0), 5.069)
True
""",
'hidden': False
},
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
| [
"[email protected]"
] | |
07229320ff7998ce9c1f50ed62799d9c7142b35a | bf47722c49c8e26fc3946ec293b080b9899240a8 | /LPR/whole_pro/__test.py | 1b09c64114b3ebe9cada6b0d51001fa3b9d07186 | [] | no_license | renxiaokai-heiyan/Smart_city_Genplate-LPD-LPR | e9aae25e7e542d3dccf02a88d9b417696d71dce8 | f93d8f1a66358be22c37ce94c089d2ab6284a602 | refs/heads/master | 2022-01-25T05:57:31.002166 | 2019-07-24T15:53:14 | 2019-07-24T15:53:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,403 | py |
"""
Author: youngorsu
Email : [email protected]
Last edited: 2018.1.29
"""
# coding=utf-8
# coding=utf-8 #
# 以utf-8编码储存中文字符
############################################################################
# 本文件主要是实现Qt界面,类“HyperLprWindow”实现主窗口,并调用其他类和函数
# 包含函数:def SimpleRecognizePlateWithGui(image):
# 包含类:class LicenseRecognizationThread(QThread):
# class HyperLprImageView(QGraphicsView):
# class HyperLprWindow(QMainWindow):
############################################################################
import sys
import os
from PyQt5.QtWidgets import (
QMainWindow,
QLabel,
QLineEdit,
QPushButton,
QHBoxLayout,
QVBoxLayout,
QGridLayout,
QTableWidget,
QWidget,
QAbstractItemView,
QHeaderView,
QGraphicsView,
QGraphicsScene,
QGraphicsPixmapItem,
QSplitter,
QFileDialog,
QTableWidgetItem,
QGraphicsRectItem,
QCheckBox,
QMessageBox,
QGroupBox,
QGraphicsSimpleTextItem,
qApp,
QAction,
QApplication)
from PyQt5.QtGui import QIcon, QColor, QPainter, QImage, QPixmap, QPen, QBrush, QFont, QPalette, QKeySequence
from PyQt5.QtCore import Qt, QDir, QSize, QEventLoop, QThread, pyqtSignal
import pipline as pp
import perspe as psp
import cv2
import numpy as np
import time
import math
import shutil
import pandas as pd
draw_plate_in_image_enable = 1
plateTypeName = ["蓝", "黄", "绿", "白", "黑 "]
Son_k = ["典型竖直透视角变化子库", "典型水平透视角变化子库", "分辨率变化子库", "亮度不均匀变化子库", "平均亮度变化子库",
"散焦模糊变化子库", "竖直错切角变化子库", "水平旋转角变化子库", "运动模糊变化子库"]
def SimpleRecognizePlateWithGui(image, path_save = "", name = ""):
t0 = time.time()
images = pp.detect.detectPlateRough(
image, image.shape[0], top_bottom_padding_rate=0.1)
res_set = []
y_offset = 32
for j, plate in enumerate(images):
plate, rect, origin_plate = plate
plate = cv2.resize(plate, (136, 36))
cv2.imencode('.jpg', plate)[1].tofile("G:/RePicture/plate/" + str(j) + ".jpg")
cv2.imencode('.jpg', origin_plate)[1].tofile("G:/RePicture/originplate/" + str(j) + ".jpg")
t1 = time.time()
# plate_type = pp.td.SimplePredict(plate)
# plate_color = plateTypeName[plate_type]
#
# if (plate_type > 0) and (plate_type < 5):
# plate = cv2.bitwise_not(plate)
# cv2.imencode('.jpg', plate)[1].tofile("G:/RePicture/bitwise_not/" + str(j) + ".jpg")
# if draw_plate_in_image_enable == 1:
# image[y_offset:y_offset + plate.shape[0], 0:plate.shape[1]] = plate
# y_offset = y_offset + plate.shape[0] + 4
image_rgb = pp.fm.findContoursAndDrawBoundingBox(plate)
cv2.imencode('.jpg', image_rgb)[1].tofile("G:/RePicture/精定位后/" + str(j) + ".jpg")
# if draw_plate_in_image_enable == 1:
# image[y_offset:y_offset + image_rgb.shape[0],
# 0:image_rgb.shape[1]] = image_rgb
# y_offset = y_offset + image_rgb.shape[0] + 4
image_rgb = pp.fv.finemappingVertical(image_rgb)
# if draw_plate_in_image_enable == 1:
# image[y_offset:y_offset + image_rgb.shape[0],
# 0:image_rgb.shape[1]] = image_rgb
# y_offset = y_offset + image_rgb.shape[0] + 4
################### 输出整张车牌 ####################
if len(path_save) > 0:
cv2.imencode('.jpg', image_rgb)[1].tofile(path_save + "/完整车牌/" + name)
# if draw_plate_in_image_enable == 1:
# image[y_offset:y_offset + image_rgb.shape[0],
# 0:image_rgb.shape[1]] = image_rgb
# y_offset = y_offset + image_rgb.shape[0] + 4
# e2e_plate, e2e_confidence = pp.e2e.recognizeOne(image_rgb)
# print("e2e:", e2e_plate, e2e_confidence)
plate_type = pp.td.SimplePredict(plate)
plate_color = plateTypeName[plate_type]
print("颜色:", plate_color)
if (plate_type > 0) and (plate_type < 5):
image_rgb = cv2.bitwise_not(image_rgb)
image_gray = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2GRAY)
# print("校正", time.time() - t1, "s")
t2 = time.time()
val = pp.segmentation.slidingWindowsEval(image_gray)
################### 输出分割后的车牌 ####################
if len(path_save) > 0 and len(val) > 0:
for i in range(7):
# cv2.imencode('.jpg', val[0][i])[1].tofile(path_save + "/分割车牌/" + str(i) + "-" + name)
cv2.imencode('.jpg', val[0][i])[1].tofile("G:/RePicture/分割车牌/" + str(i) + "-" + name)
# print val
# print("分割和识别", time.time() - t2, "s")
# res = ""
# confidence = 0
# if len(val) == 3:
# blocks, res, confidence = val
# if confidence / 7 > 0.7:
#
# if draw_plate_in_image_enable == 1:
# image = pp.drawRectBox(image, rect, res)
# for i, block in enumerate(blocks):
# block_ = cv2.resize(block, (24, 24))
# block_ = cv2.cvtColor(block_, cv2.COLOR_GRAY2BGR)
# image[j * 24:(j * 24) + 24, i *
# 24:(i * 24) + 24] = block_
# if image[j * 24:(j * 24) + 24,
# i * 24:(i * 24) + 24].shape == block_.shape:
# pass
#
# res_set.append([res,
# confidence / 7,
# rect,
# plate_color,
# e2e_plate,
# e2e_confidence,
# len(blocks)])
# print("seg:", res, confidence/7)
# print(time.time() - t0, "s")
print("---------------------------------")
return image, res_set
# ***************************************************************************************
# 生成卷积核和锚点
def genaratePsf(length, angle):
EPS = np.finfo(float).eps
alpha = (angle - math.floor(angle / 180) * 180) / 180 * math.pi
cosalpha = math.cos(alpha)
sinalpha = math.sin(alpha)
if cosalpha < 0:
xsign = -1
elif angle == 90:
xsign = 0
else:
xsign = 1
psfwdt = 1
# 模糊核大小
sx = int(math.fabs(length * cosalpha + psfwdt * xsign - length * EPS))
sy = int(math.fabs(length * sinalpha + psfwdt - length * EPS))
psf1 = np.zeros((sy, sx))
# psf1是左上角的权值较大,越往右下角权值越小的核。
# 这时运动像是从右下角到左上角移动
for i in range(0, sy):
for j in range(0, sx):
psf1[i][j] = i * math.fabs(cosalpha) - j * sinalpha
rad = math.sqrt(i * i + j * j)
if rad >= (length/2) and math.fabs(psf1[i][j]) <= psfwdt:
temp = (length/2) - math.fabs((j + psf1[i][j] * sinalpha) / cosalpha)
psf1[i][j] = math.sqrt(psf1[i][j] * psf1[i][j] + temp * temp)
psf1[i][j] = psfwdt + EPS - math.fabs(psf1[i][j])
if psf1[i][j] < 0:
psf1[i][j] = 0
# 运动方向是往左上运动,锚点在(0,0)
anchor = (0, 0)
# 运动方向是往右上角移动,锚点一个在右上角
# 同时,左右翻转核函数,使得越靠近锚点,权值越大
if angle < 90 and angle > 0:
psf1 = np.fliplr(psf1)
anchor = (psf1.shape[1] - 1, 0)
elif angle > -90 and angle < 0: # 同理:往右下角移动
psf1 = np.flipud(psf1)
psf1 = np.fliplr(psf1)
anchor = (psf1.shape[1] - 1, psf1.shape[0] - 1)
elif anchor < -90: # 同理:往左下角移动
psf1 = np.flipud(psf1)
anchor = (0, psf1.shape[0] - 1)
psf1 = psf1 / psf1.sum()
return psf1, anchor
# 使用范例
# kernel, anchor = genaratePsf(20, 40)
# motion_blur = cv2.filter2D(image, -1, kernel, anchor=anchor)
# ***************************************************************************************
# 产生运动模糊图像
def motion_blur(image, degree=10, angle=20):
image = np.array(image)
# 这里生成任意角度的运动模糊kernel的矩阵, degree越大,模糊程度越高
M = cv2.getRotationMatrix2D((degree / 2, degree / 2), angle, 1)
motion_blur_kernel = np.diag(np.ones(degree))
motion_blur_kernel = cv2.warpAffine(motion_blur_kernel, M, (degree, degree))
motion_blur_kernel = motion_blur_kernel / degree
blurred = cv2.filter2D(image, -1, motion_blur_kernel)
# convert to uint8
cv2.normalize(blurred, blurred, 0, 255, cv2.NORM_MINMAX)
blurred = np.array(blurred, dtype=np.uint8)
return blurred
# 使用范例
# motion_out = motion_blur(image, 20, 40)
# 产生散焦模糊图像
# cv2.GaussianBlur(image, ksize=(degree, degree), sigmaX=0, sigmaY=0)
# ***************************************************************************************
import matplotlib.pyplot as graph
import numpy as np
from numpy import fft
import math
import cv2
from skimage.measure import compare_ssim
# 仿真运动模糊
def motion_process(image_size, motion_angle):
PSF = np.zeros(image_size)
print(image_size)
center_position = (image_size[0] - 1) / 2
print(center_position)
slope_tan = math.tan(motion_angle * math.pi / 180)
slope_cot = 1 / slope_tan
if abs(slope_tan) <= 1:
for i in range(15):
offset = round(i * slope_tan) # ((center_position-i)*slope_tan)
PSF[int(center_position + offset), int(center_position - offset)] = 1
return PSF / PSF.sum() # 对点扩散函数进行归一化亮度
else:
for i in range(15):
offset = round(i * slope_cot)
PSF[int(center_position - offset), int(center_position + offset)] = 1
return PSF / PSF.sum()
# 对图片进行运动模糊
def make_blurred(input, PSF, eps):
input_fft = fft.fft2(input) # 进行二维数组的傅里叶变换
PSF_fft = fft.fft2(PSF) + eps
blurred = fft.ifft2(input_fft * PSF_fft)
blurred = np.abs(fft.fftshift(blurred))
return blurred
def inverse(input, PSF, eps): # 逆滤波
input_fft = fft.fft2(input)
PSF_fft = fft.fft2(PSF) + eps # 噪声功率,这是已知的,考虑epsilon
result = fft.ifft2(input_fft / PSF_fft) # 计算F(u,v)的傅里叶反变换
result = np.abs(fft.fftshift(result))
return result
def wiener(input, PSF, eps, K=0.01): # 维纳滤波,K=0.01
input_fft = fft.fft2(input)
PSF_fft = fft.fft2(PSF) + eps
PSF_fft_1 = np.conj(PSF_fft) / (np.abs(PSF_fft) ** 2 + K)
result = fft.ifft2(input_fft * PSF_fft_1)
result = np.abs(fft.fftshift(result))
return result
def rad(x):
return x * np.pi / 180
import predictionM as predm
if __name__ == '__main__':
# path = "./img_test/功能评测图像库/车牌种类变化子库/教练车牌/" # 粤C1557学
# path = "./img_test/功能评测图像库/车牌种类变化子库/澳门出入境车牌/" # 粤Z3810澳
# path = "./img_test/功能评测图像库/车牌种类变化子库/新能源-小车牌/" # 粤CD08828
# path = "./img_test/功能评测图像库/车牌种类变化子库/新能源-大车牌/" # 粤C00209D
# path = "./Dataset/车牌种类变化子库/大型汽车前牌/" # 川B23523
# path = "./Dataset/车牌种类变化子库/大型汽车后牌/" # 粤W07717 不出错,但没有操作:粤W07655
# path = "./img_test/功能评测图像库/车牌种类变化子库/军用车牌/" # GB34114
# path = "./img_test/性能评测图像库/典型竖直透视角变化子库/竖直透视角60/" # 60_0018
# path = "./img_test/性能评测图像库/运动模糊变化子库/motion1/" # 001_0001
# path = "./img_test/功能评测图像库/省市简称变化子库/“新”牌/" # 60_0018
# name = "a.jpg"
# path_to = "G:/RePicture"
# image = cv2.imdecode(np.fromfile(path + name, dtype=np.uint8), -1)
# image, res_set = SimpleRecognizePlateWithGui(image, path_to, name)
# image = cv2.imdecode(np.fromfile("G:/RePicture/a.jpg", dtype=np.uint8), -1)
# image_blurred = cv2.imdecode(np.fromfile("G:/RePicture/014_0001.jpg", dtype=np.uint8), -1)
#
# image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# image_blurred = cv2.cvtColor(image_blurred, cv2.COLOR_BGR2GRAY)
#
# img_h = image.shape[0]
# img_w = image.shape[1]
# # graph.figure(1)
# # graph.xlabel("Original Image")
# # graph.gray()
# # graph.imshow(image) # 显示原图像
#
# graph.figure(2)
# graph.gray()
# # 进行运动模糊处理
# PSF = motion_process((img_h, img_w), 60)
# blurred = np.abs(make_blurred(image, PSF, 1e-3))
#
# score, diff = compare_ssim(image_blurred, blurred.astype("uint8"), full=True)
#
# graph.subplot(231)
# graph.xlabel("Motion blurred")
# graph.imshow(blurred)
#
#
# result = inverse(image_blurred, PSF_max, 1e-3) # 逆滤波
# graph.subplot(232)
# graph.xlabel("inverse deblurred")
# graph.imshow(result)
#
# result = wiener(image_blurred, PSF_max, 1e-3) # 维纳滤波
# graph.subplot(233)
# graph.xlabel("wiener deblurred(k=0.01)")
# graph.imshow(result)
#
#
# blurred_noisy = blurred + 0.1 * blurred.std() * \
# np.random.standard_normal(blurred.shape) # 添加噪声,standard_normal产生随机的函数
#
# graph.subplot(234)
# graph.xlabel("motion & noisy blurred")
# graph.imshow(blurred_noisy) # 显示添加噪声且运动模糊的图像
#
# result = inverse(blurred_noisy, PSF, 0.1 + 1e-3) # 对添加噪声的图像进行逆滤波
# graph.subplot(235)
# graph.xlabel("inverse deblurred")
# graph.imshow(result)
#
# result = wiener(blurred_noisy, PSF, 0.1 + 1e-3) # 对添加噪声的图像进行维纳滤波
# graph.subplot(236)
# graph.xlabel("wiener deblurred(k=0.01)")
# graph.imshow(result)
#
# graph.show()
a = predm.Pre7()
a1, a2= a.Run("./OutDataset/完整车牌/典型水平透视角变化子库/水平透视角2/")
print("a1:", a1)
print("a2:", a2)
a1, a2= a.Run("./OutDataset/完整车牌/典型水平透视角变化子库/水平透视角3/")
print("a1:", a1)
print("a2:", a2)
| [
"[email protected]"
] | |
18e1a95c1abf23d6839f1e3b9d1c9bdb145e2c6b | 3a2c449c5e870a96958772c8ba89d9e119f4b098 | /210-219/214.py | 57e1ebcaba9eded40fa81fa6a3cfafff5cedca8b | [] | no_license | pedrotari7/euler | 0a8ea69c616b84bddfaeaf5d643592a73f321456 | 692ca67c895e3d0c1655bd53501f3d22c1ebb56c | refs/heads/master | 2020-05-21T04:43:34.816108 | 2017-12-11T19:25:31 | 2017-12-11T19:25:31 | 52,283,414 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,788 | py | from collections import Counter
from itertools import repeat, takewhile
from fractions import Fraction
def multiply(numbers):
result = 1
for n in numbers:
result *= n
return result
class Primes:
def __init__(self, n):
N = n // 2
sieve = [True] * N
for i in range(3, int(n**0.5) + 1, 2):
if sieve[i // 2]:
start = i ** 2 // 2
sieve[start::i] = repeat(False, len(range(start, N, i)))
self._list = [2] + [2*i+1 for i in range(1, N) if sieve[i]]
self._set = set(self._list)
self.maxn = n
def upto(self, n):
if self.maxn < n:
self.__init__(max(n, 2 * self.maxn))
return takewhile(lambda x: x <= n, self._list)
class Factors:
def __init__(self, maxn):
self.largest = [1] * maxn
for p in primes.upto(maxn):
self.largest[p::p] = repeat(p, len(range(p, maxn, p)))
def totient(self, n):
return int(n * multiply(1 - Fraction(1, p) for p in set(self(n))))
def __call__(self, n):
result = []
if n >= len(self.largest):
for p in primes:
while n % p == 0:
result.append(p)
n = n // p
if n < len(self.largest):
break
while n > 1:
p = self.largest[n]
result.append(p)
n = n // p
return result
toti = dict()
def chain_size(n):
if n == 1: return 1
if n not in toti: toti[n] = 1 + chain_size(factors.totient(n))
return toti[n]
N = 40*10**6
s = 25
primes = Primes(N)
factors = Factors(N)
total = 0
for i in primes.upto(N):
if chain_size(i) == s:
total+=i
print total
| [
"[email protected]"
] | |
76aad4906a42f7841a9200152a71a8a89d9f4198 | 851f7fde684774ca0388a28cb7035aa1e95f5de0 | /Ercesscorp/migrations/0003_auto_20190511_1215.py | 4f63b800ada217b5c6fb3fd7a0f3078b9c00f073 | [] | no_license | aditya2222/django-tickets | 01451a724cf97c8f2f338ba85a704e85ae57b008 | 3c2ecd252479fc5821873823cdbbb4641268a2d2 | refs/heads/master | 2022-12-16T17:17:07.821446 | 2019-05-12T02:58:05 | 2019-05-12T02:58:05 | 186,204,071 | 0 | 0 | null | 2022-11-22T03:22:25 | 2019-05-12T02:55:47 | JavaScript | UTF-8 | Python | false | false | 328 | py | # Generated by Django 2.1.7 on 2019-05-11 12:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Ercesscorp', '0002_auto_20190511_0950'),
]
operations = [
migrations.AlterModelTable(
name='users',
table='users',
),
]
| [
"[email protected]"
] | |
eee6625e9a6a9f549bd2be92f4c3b33bd8a94475 | 5aaa310a93d5154a80389e1361bcb0e4a3d4f903 | /day07/01_奇异值分解.py | 1c0c63677e68843f96dea8ae5550bc32c6ff93f5 | [] | no_license | 1751660300/ai | 9ef4afb893e59d99c150e0d2abac378c48568bb8 | f5eb845d2e4a9eb73fffb089133191e5edecce77 | refs/heads/master | 2022-12-11T21:53:05.702992 | 2020-09-09T14:29:57 | 2020-09-09T14:29:57 | 294,137,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py | # -*- coding:utf-8 -*-
"""
1.什么是奇异值分解?
一个矩阵M,可以分解为3个矩阵U,S,V,使得U * S * V等于M。
U,V都是 正交矩阵(正交矩阵乘于该矩阵的转置等于单位矩阵)
那么S矩阵主对角线上的元素称为矩阵M的奇异值,其他元素都为0.
2.奇异值的应用
可以根据奇异值,逆推原矩阵,跟特征值的作用相似,不同的是奇异值可以提取非方阵,特征值的提取
必须是方阵
3.获取奇异值
使用numpy.linalg.svd(M, full_matrices=False)方法,返回三个矩阵U,S,V,但是S是一个一维数组(就是奇异值)
full_matrices=False:是否返回完全的方阵
"""
import numpy as np
M = np.mat("4 11 14; 8 7 -2")
print(M)
# 奇异值分解
U, S, V = np.linalg.svd(M, full_matrices=False)
# 如果不是设置 full_matrices=False 则函数返回的U,V就是方阵
# U, S, V = np.linalg.svd(M)
print(U * U.T)
print(V * V.T)
print(S)
S = np.diag(S)
print(S)
print(U * S * V)
| [
"[email protected]"
] | |
a0ca94ab4a085f99eab19d2b5ba6351ef37814b6 | 303d4aa8ec749844d2f7472e1006b46379c62bc6 | /test/turtle模块绘图.py | a390c53b3c474cdcab4ae23fb629576af852457d | [] | no_license | suntyneu/test | d6f63791006785518d8e4c92d89018049396dd01 | a05f93fe71cdd0594fba180e0bed52d402b47bb2 | refs/heads/master | 2023-03-26T10:55:28.214978 | 2021-03-20T07:04:03 | 2021-03-20T10:02:38 | 349,372,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py | """
绘图工具,提供一个小海龟,理解为一个机器人呢,只能听得懂有限的命令。
绘图窗口原地在(0, 0)默认朝向右侧
导入 turtle库
运动命令:
forward(d) 向前移动d长度
backward(d) 向后移动d长度
笔画控制命令:
其他命令:
done() 程序继续执行
"""
import turtle
turtle.forward(100)
turtle.right(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(100)
turtle.done()
| [
"[email protected]"
] | |
986960919be0320376e0dde6dcf68eafef65c5ed | 587dbdf730b6cc3e693efc5dca5d83d1dd35ee1a | /leetcode/msjd/01-02.py | d4550e72553de9dc183448adc39bf81ca9d60ffd | [] | no_license | Rivarrl/leetcode_python | 8db2a15646d68e4d84ab263d8c3b6e38d8e3ea99 | dbe8eb449e5b112a71bc1cd4eabfd138304de4a3 | refs/heads/master | 2021-06-17T15:21:28.321280 | 2021-03-11T07:28:19 | 2021-03-11T07:28:19 | 179,452,345 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 721 | py | # -*- coding: utf-8 -*-
# ======================================
# @File : 01-02.py
# @Time : 2020/5/9 23:06
# @Author : Rivarrl
# ======================================
# [面试题 01.02. 判定是否互为字符重排](https://leetcode-cn.com/problems/check-permutation-lcci/)
from algorithm_utils import *
class Solution:
@timeit
def CheckPermutation(self, s1: str, s2: str) -> bool:
from collections import Counter
d1, d2 = Counter(s1), Counter(s2)
for c in d2:
if not c in d1 or d1[c] != d2[c]:
return False
return True
if __name__ == '__main__':
a = Solution()
a.CheckPermutation("abc", "bca")
a.CheckPermutation("abc", "bad") | [
"[email protected]"
] | |
b329ab9cc69df8e15b2920117a312e186e8c8a28 | aef1ea6df9f7fa7a812d9330873dca97ef205e53 | /sales/migrations/0004_auto_20200218_1844.py | a86f6ea059f5e922dc4fb1f7b23d3d09a6ee6e63 | [] | no_license | iamshakibulislam/bakery-management | 0033fec1178d24e427ef68d025682501c5ba6320 | 2751b2cc1f76eeb5825bc3133234ba97e1415569 | refs/heads/master | 2023-02-17T17:05:51.078466 | 2021-01-18T08:36:18 | 2021-01-18T08:36:18 | 254,834,024 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,500 | py | # Generated by Django 3.0.3 on 2020-02-18 12:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sales', '0003_auto_20200216_2114'),
]
operations = [
migrations.CreateModel(
name='pay_retail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(default='2020-02-18')),
('amount', models.IntegerField()),
('name', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sales.saleman_list')),
],
),
migrations.AlterField(
model_name='deposit_from_saleman',
name='date',
field=models.DateField(default='2020-02-18'),
),
migrations.AlterField(
model_name='retail',
name='date',
field=models.DateField(default='2020-02-18'),
),
migrations.AlterField(
model_name='retail_sales',
name='date',
field=models.DateField(default='2020-02-18'),
),
migrations.AlterField(
model_name='saleman_sale',
name='date',
field=models.DateField(default='2020-02-18'),
),
migrations.DeleteModel(
name='pay_saleman',
),
]
| [
"[email protected]"
] | |
6cd6f32fe0bd293bf31f942b6055041e058d88de | 017032b3ea86f938925c33858802b2fb5cb0832f | /instagram/config/urls/apis.py | 7c602cc3ec38f9dd3d92db96002088c384071cba | [] | no_license | Fastcampus-WPS-6th/Instagram | 664d9c9fb2ac1a76381bbeece47f791759240a2c | 2b859b3b27dab25e59097b17b9f3940fadb5deeb | refs/heads/master | 2022-12-16T00:55:45.638303 | 2017-11-14T07:53:04 | 2017-11-14T07:53:04 | 106,781,002 | 2 | 2 | null | 2022-12-08T00:37:33 | 2017-10-13T05:19:17 | Python | UTF-8 | Python | false | false | 197 | py | from django.conf.urls import url, include
urlpatterns = [
url(r'^member/', include('member.urls.apis', namespace='member')),
url(r'^post/', include('post.urls.apis', namespace='post')),
]
| [
"[email protected]"
] | |
e558be579cdc61dac9294eb4943e5a9f6997937c | e34a44c07adb818a15dd0742761a4c2cf4258336 | /src/homework/homework11/main.py | 1b7c6fe9b9694138f40a7da83440d5654e1ee2e3 | [
"MIT"
] | permissive | acc-cosc-1336/cosc-1336-spring-2018-jjmareck | 629e9cdb3a0f091e440e6dccbd2bc23341df4a2c | 7abfd79cb9a63192c965f828a185ccd981820bae | refs/heads/master | 2021-05-16T14:08:06.763135 | 2018-05-12T03:33:17 | 2018-05-12T03:33:17 | 118,071,035 | 0 | 0 | MIT | 2018-02-25T23:29:58 | 2018-01-19T03:22:07 | Python | UTF-8 | Python | false | false | 1,061 | py | from src.homework.homework11.player import Player
from src.homework.homework11.game_log import GameLog
from src.homework.homework11.die6 import Die6
from src.homework.homework11.die8 import Die8
#write import statements for Die6 and Die8 classes
game_log = GameLog()
#ASSIGNMENT 12: Write statements to create Die6 and Die8 instances
die6 = Die6()
die8 = Die8()
#ASSIGNMENT12: pass the Die6 and Die8 instance object variables to the Player instantiation below as parameters after
#the game_log parameter
Player(game_log,die6,die8).roll_doubles()
game_log.display_log()
#ASSIGNMENT12: Create another GameLog instance
#ASSIGNMENT12: Write statements to create Die6 and Die8 instances
#ASSIGNMENT12: Create a new instance of the Player class and pass it the game log, die6, and die8 instances.
#ASSIGNMENT12: Call the player instance roll_doubles.
#ASSIGNMENT12: Call the game log instance display_log method.
game_log_2 = GameLog()
die6_2 = Die6()
die8_2 = Die8()
player2 = Player(game_log_2,die6_2,die8_2)
player2.roll_doubles()
game_log_2.display_log()
| [
"[email protected]"
] | |
f700efc6c3f7cee15c14e7308729bd1b4b9dd32f | c237dfae82e07e606ba9385b336af8173d01b251 | /ZServer/medusa/unix_user_handler.py | 7d737c80d552bdf9b0637945802616f82efc5063 | [
"ZPL-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | OS2World/APP-SERVER-Zope | 242e0eec294bfb1ac4e6fa715ed423dd2b3ea6ff | dedc799bd7eda913ffc45da43507abe2fa5113be | refs/heads/master | 2020-05-09T18:29:47.818789 | 2014-11-07T01:48:29 | 2014-11-07T01:48:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,445 | py | # -*- Mode: Python; tab-width: 4 -*-
#
# Author: Sam Rushing <[email protected]>
# Copyright 1996, 1997 by Sam Rushing
# All Rights Reserved.
#
RCS_ID = '$Id: unix_user_handler.py,v 1.3 2001/05/01 11:44:49 andreas Exp $'
# support for `~user/public_html'.
import re
import string
import default_handler
import filesys
import os
import pwd
get_header = default_handler.get_header
user_dir = re.compile ('/~([^/]+)(.*)')
class unix_user_handler (default_handler.default_handler):
def __init__ (self, public_html = 'public_html'):
self.public_html = public_html
default_handler.default_handler.__init__ (self, None)
# cache userdir-filesystem objects
fs_cache = {}
def match (self, request):
m = user_dir.match (request.uri)
return m and (m.end() == len (request.uri))
def handle_request (self, request):
# get the user name
user = user_dir.group(1)
rest = user_dir.group(2)
# special hack to catch those lazy URL typers
if not rest:
request['Location'] = 'http://%s/~%s/' % (
request.channel.server.server_name,
user
)
request.error (301)
return
# have we already built a userdir fs for this user?
if self.fs_cache.has_key (user):
fs = self.fs_cache[user]
else:
# no, well then, let's build one.
# first, find out where the user directory is
try:
info = pwd.getpwnam (user)
except KeyError:
request.error (404)
return
ud = info[5] + '/' + self.public_html
if os.path.isdir (ud):
fs = filesys.os_filesystem (ud)
self.fs_cache[user] = fs
else:
request.error (404)
return
# fake out default_handler
self.filesystem = fs
# massage the request URI
request.uri = '/' + rest
return default_handler.default_handler.handle_request (self, request)
def __repr__ (self):
return '<Unix User Directory Handler at %08x [~user/%s, %d filesystems loaded]>' % (
id(self),
self.public_html,
len(self.fs_cache)
)
| [
"[email protected]"
] | |
c57e5a71b7397ea1d38649655ed8f773245c84e5 | 6392354e74cce4a303a544c53e13d0a7b87978ee | /m6/MyBlog/MyBlog/settings.py | 0136cc69f02b7cb015f3c2b498568636608dfbcd | [] | no_license | music51555/wxPythonCode | dc35e42e55d11850d7714a413da3dde51ccdd37e | f77b71ed67d926fbafd1cfec89de8987d9832016 | refs/heads/master | 2020-04-11T20:20:38.136446 | 2019-04-01T09:17:34 | 2019-04-01T09:17:34 | 162,067,449 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,973 | py | """
Django settings for MyBlog project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '84_z7kklzmg9y2wl^azw-=a&zs)zn&6akunu$w+jps3czbff2-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
LOGIN_URL='/login/'
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'MyBlog.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console':{
'level':'DEBUG',
'class':'logging.StreamHandler',
},
},
'loggers': {
'django.db.backends': {
'handlers': ['console'],
'propagate': True,
'level':'DEBUG',
},
}
}
WSGI_APPLICATION = 'MyBlog.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'blog',
'HOST': '140.143.132.118',
'PORT': 3306,
'USER': 'xiaoxin',
'PASSWORD': 'Nishi458_2'
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL='blog.UserInfo'
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR,'static')
]
MEDIA_ROOT=os.path.join(BASE_DIR,'media')
# MEDIA_URL = '/media/'
EMAIL_HOST = 'smtp.qq.com'
EMAIL_POST = 465
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = 'dqccbhjmkjxpbgig'
# DEFAULT_FROM_EMAIL = ''
EMAIL_USE_SSL = False
| [
"[email protected]"
] | |
a6e7c6bb706e3fdd574e0eef4746a4df0fdafa16 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/services/services/user_interest_service/client.py | 6bc0da041072cbf210bd1bc4607c85affbd6adad | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,847 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v6.common.types import criterion_category_availability
from google.ads.googleads.v6.enums.types import user_interest_taxonomy_type
from google.ads.googleads.v6.resources.types import user_interest
from google.ads.googleads.v6.services.types import user_interest_service
from .transports.base import UserInterestServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import UserInterestServiceGrpcTransport
class UserInterestServiceClientMeta(type):
"""Metaclass for the UserInterestService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[UserInterestServiceTransport]]
_transport_registry['grpc'] = UserInterestServiceGrpcTransport
def get_transport_class(cls,
label: str = None,
) -> Type[UserInterestServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class UserInterestServiceClient(metaclass=UserInterestServiceClientMeta):
"""Service to fetch Google Ads User Interest."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = 'googleads.googleapis.com'
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
UserInterestServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
UserInterestServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename)
kwargs['credentials'] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> UserInterestServiceTransport:
"""Return the transport used by the client instance.
Returns:
UserInterestServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def user_interest_path(customer_id: str,user_interest_id: str,) -> str:
"""Return a fully-qualified user_interest string."""
return "customers/{customer_id}/userInterests/{user_interest_id}".format(customer_id=customer_id, user_interest_id=user_interest_id, )
@staticmethod
def parse_user_interest_path(path: str) -> Dict[str,str]:
"""Parse a user_interest path into its component segments."""
m = re.match(r"^customers/(?P<customer_id>.+?)/userInterests/(?P<user_interest_id>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str, ) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(billing_account=billing_account, )
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str,str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str, ) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder, )
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str,str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str, ) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization, )
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str,str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str, ) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project, )
@staticmethod
def parse_common_project_path(path: str) -> Dict[str,str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str, ) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(project=project, location=location, )
@staticmethod
def parse_common_location_path(path: str) -> Dict[str,str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(self, *,
credentials: Optional[credentials.Credentials] = None,
transport: Union[str, UserInterestServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the user interest service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.UserInterestServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")))
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, UserInterestServiceTransport):
# transport is a UserInterestServiceTransport instance.
if credentials:
raise ValueError('When providing a transport instance, '
'provide its credentials directly.')
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = UserInterestServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_user_interest(self,
request: user_interest_service.GetUserInterestRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> user_interest.UserInterest:
r"""Returns the requested user interest in full detail
Args:
request (:class:`google.ads.googleads.v6.services.types.GetUserInterestRequest`):
The request object. Request message for
[UserInterestService.GetUserInterest][google.ads.googleads.v6.services.UserInterestService.GetUserInterest].
resource_name (:class:`str`):
Required. Resource name of the
UserInterest to fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v6.resources.types.UserInterest:
A user interest: a particular
interest-based vertical to be targeted.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError('If the `request` argument is set, then none of '
'the individual field arguments should be set.')
# Minor optimization to avoid making a copy if the user passes
# in a user_interest_service.GetUserInterestRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, user_interest_service.GetUserInterestRequest):
request = user_interest_service.GetUserInterestRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_user_interest]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
('resource_name', request.resource_name),
)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
__all__ = (
'UserInterestServiceClient',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
45f5e444d8a77f49dc82fa426bd042cefa7c4e05 | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /important_hand/part_or_year.py | a20ee7526e10994fd0dd1c9a222a98873a7db3b1 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py |
#! /usr/bin/env python
def good_government_or_point(str_arg):
first_time(str_arg)
print('make_week')
def first_time(str_arg):
print(str_arg)
if __name__ == '__main__':
good_government_or_point('time')
| [
"[email protected]"
] | |
d9fbfe1bb534eb343c1c47c1aebd8bfb00e7d944 | 75a35cefa5adf2f42503eb0cc8c60f7f96ff9650 | /produccion/migrations/0010_prodleche_vo.py | e1e409b5bba6811d832d082f8f1c4ab365bc8f15 | [] | no_license | PatacaSis/agroweb | 5c70f35001d0e88fb5f1642161d4eee6b4abda59 | e2181fa0bb6ca7752bdbaab62fe60ede9f2630b2 | refs/heads/main | 2023-06-20T23:37:50.294745 | 2021-07-19T23:26:55 | 2021-07-19T23:26:55 | 381,737,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | # Generated by Django 2.2 on 2021-06-24 19:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('produccion', '0009_auto_20210618_0943'),
]
operations = [
migrations.AddField(
model_name='prodleche',
name='vo',
field=models.IntegerField(default=1),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
f43a80bb21f4e1d7ace060143a3ff8fd95b5a258 | cb95db2638e100f52f8810747fd3ee7be3660b1f | /static/audio_file/clear_cache.py | 0988c8635225a139a6dfd03fe36462f48b02e6fe | [
"MIT"
] | permissive | wanZzz6/smart_robot | c16c7e20c421ff7431a00b95a9f7c5ec56bbcb92 | 769dc3a3dbf35d43abc416c08ba8be81bff72747 | refs/heads/db_version | 2023-05-28T12:40:55.928491 | 2020-04-20T03:52:48 | 2020-04-20T03:52:48 | 140,998,544 | 5 | 1 | null | 2023-05-22T21:37:20 | 2018-07-15T04:38:10 | CSS | UTF-8 | Python | false | false | 158 | py | import os
all_file = os.listdir('.')
print('正在清理缓存')
for i in all_file:
if 'auido' in i or 'clear' in i:
continue
os.remove(i)
| [
"[email protected]"
] | |
ae8f27c547a5e25a413c4ee71d01778d834057a3 | 17d23f404a20c34a406dd086b0a89f956c4ecac0 | /Django-Tutorials/listening/migrations/0001_initial.py | 66398fa340306646d1693b2b624511c665b7fc9f | [] | no_license | apabhishek178/ieltsonline | 69df682862d96bc04b318262e962e22a0919fe88 | 42061efa8293c948342a670f0a62c90d3b31ebff | refs/heads/master | 2020-04-26T09:19:55.712217 | 2019-03-20T13:36:31 | 2019-03-20T13:36:31 | 173,451,873 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,002 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2019-02-18 16:03
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AudioMain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Question_Name', models.CharField(default='Listening Part', max_length=50)),
('Instruction', models.TextField(default='instructions', max_length=2000)),
('audio_file', models.FileField(help_text='Allowed type - .mp3, .wav, .ogg', null=True, upload_to='audiofolder/')),
],
),
migrations.CreateModel(
name='Fillup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Question_Name', models.CharField(default='Fillup Question', max_length=50)),
('Instruction', models.TextField(default='your instructions', max_length=2000)),
('Question', models.TextField(default='Your questions', max_length=100)),
('audio', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='audioFill', to='listening.AudioMain')),
],
),
migrations.CreateModel(
name='FillupQue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('part1', models.CharField(max_length=100, null=True)),
('part2', models.CharField(max_length=100, null=True)),
('linked', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='linkedfillup', to='listening.Fillup')),
],
),
migrations.CreateModel(
name='MapMatch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('map', models.ImageField(null=True, upload_to='audiomap/')),
('Question_Name', models.CharField(default='Map Question', max_length=50)),
('Instruction', models.TextField(default='your instructions', max_length=2000)),
('Question', models.TextField(default='Your questions', max_length=100)),
('audio', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='audioMap', to='listening.AudioMain')),
],
),
migrations.CreateModel(
name='MapMatchQues',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('part', models.CharField(max_length=100, null=True)),
('linked', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='linkedmap', to='listening.MapMatch')),
],
),
migrations.CreateModel(
name='Matching',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Question_Name', models.CharField(default='Matching Question', max_length=50)),
('Instruction', models.TextField(default='your instructions', max_length=2000)),
('l1', models.CharField(max_length=100, null=True)),
('l2', models.CharField(max_length=100, null=True)),
('l3', models.CharField(blank=True, max_length=100, null=True)),
('l4', models.CharField(blank=True, max_length=100, null=True)),
('l5', models.CharField(blank=True, max_length=100, null=True)),
('l6', models.CharField(blank=True, max_length=100, null=True)),
('l7', models.CharField(blank=True, max_length=100, null=True)),
('l8', models.CharField(blank=True, max_length=100, null=True)),
('l9', models.CharField(blank=True, max_length=100, null=True)),
('l10', models.CharField(blank=True, max_length=100, null=True)),
('r1', models.CharField(max_length=100, null=True)),
('r2', models.CharField(max_length=100, null=True)),
('r3', models.CharField(blank=True, max_length=100, null=True)),
('r4', models.CharField(blank=True, max_length=100, null=True)),
('r5', models.CharField(blank=True, max_length=100, null=True)),
('r6', models.CharField(blank=True, max_length=100, null=True)),
('r7', models.CharField(blank=True, max_length=100, null=True)),
('r8', models.CharField(blank=True, max_length=100, null=True)),
('r9', models.CharField(blank=True, max_length=100, null=True)),
('r10', models.CharField(blank=True, max_length=100, null=True)),
('audio', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='audioMatch', to='listening.AudioMain')),
],
),
migrations.CreateModel(
name='MCQ',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Question_Name', models.CharField(default='MCQ Question', max_length=50)),
('Instruction', models.TextField(default='your instructions', max_length=2000)),
('passage', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='audiomcq', to='listening.AudioMain')),
],
),
migrations.CreateModel(
name='MCQQues',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Question', models.TextField(default='Your questions', max_length=100)),
('option1', models.CharField(max_length=100, null=True)),
('option2', models.CharField(max_length=100, null=True)),
('option3', models.CharField(blank=True, max_length=100, null=True)),
('option4', models.CharField(blank=True, max_length=100, null=True)),
('option5', models.CharField(blank=True, max_length=100, null=True)),
('option6', models.CharField(blank=True, max_length=100, null=True)),
('linked', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='linkedmcq', to='listening.MCQ')),
],
),
migrations.CreateModel(
name='Summary',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Question_Name', models.CharField(default='audio Question', max_length=50)),
('Instruction', models.TextField(default='your instructions', max_length=2000)),
('part1', models.CharField(max_length=100, null=True)),
('part2', models.CharField(max_length=100, null=True)),
('part3', models.CharField(blank=True, max_length=100, null=True)),
('part4', models.CharField(blank=True, max_length=100, null=True)),
('part5', models.CharField(blank=True, max_length=100, null=True)),
('part6', models.CharField(blank=True, max_length=100, null=True)),
('part7', models.CharField(blank=True, max_length=100, null=True)),
('audio', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='audiosumm', to='listening.AudioMain')),
],
),
]
| [
"[email protected]"
] | |
e7d844941964fce550c0dfc36f57e53d74501643 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/liangliangyy_DjangoBlog/DjangoBlog-master/comments/templatetags/comments_tags.py | e23ec803c3847a3a33e438bbaf6b3c38171df187 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 1,676 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@version: ??
@author: liangliangyy
@license: MIT Licence
@contact: [email protected]
@site: https://www.lylinux.org/
@software: PyCharm
@file: comments_tags.py
@time: 2016/11/2 下午9:17
"""
from django import template
from django.template.loader import render_to_string
from ..models import Comment
from blog.models import Article
from comments.forms import CommentForm
register = template.Library()
@register.assignment_tag
def parse_commenttree(commentlist, comment):
"""获得当前评论子评论的列表
用法: {% parse_commenttree article_comments comment as childcomments %}
"""
datas = []
def parse(c):
childs = commentlist.filter(parent_comment=c)
for child in childs:
datas.append(child)
parse(child)
parse(comment)
return datas
@register.inclusion_tag('comments/tags/comment_item.html')
def show_comment_item(comment, ischild):
"""评论"""
depth = 1 if ischild else 2;
return {
'comment_item': comment,
'depth': depth
}
"""
@register.simple_tag(name='get_comment_count')
def GetCommentCount(parser, token):
commentcount = Comment.objects.filter(article__author_id=token).count()
return "0" if commentcount == 0 else str(commentcount) + " comments"
@register.inclusion_tag('comments/tags/post_comment.html')
def load_post_comment(article, lastform=None):
if not lastform:
form = CommentForm()
form.article_id = article.id
form.parent_comment_id = ''
else:
form = lastform
return {
'article': article,
'form': form
}
"""
| [
"[email protected]"
] | |
fe090bcc2a7b8e2051f8eb6a23f63a8b9527277a | cf7d96bdd34205ede987f0985dfc9e3ab415ee06 | /product_fifo_lifo/account_anglo_saxon.py | 8ac76dbdc2a5b41e43c82722072e70e3a928799c | [] | no_license | hendrasaputra0501/btxjalan | afc93467d54a6f20ef6ac46f7359e964ad5d42a0 | d02bc085ad03efc982460d77f7af1eb5641db729 | refs/heads/master | 2020-12-30T11:02:05.416120 | 2017-07-31T01:34:08 | 2017-07-31T01:34:08 | 98,836,234 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 12,614 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv,fields
class account_invoice_line(osv.osv):
_inherit = "account.invoice.line"
""" Override account_invoice_line to add the link to the purchase order line it is related to"""
_columns = {
'purchase_line_id': fields.many2one('purchase.order.line',
'Purchase Order Line', ondelete='set null', select=True,
readonly=True),
}
def move_line_get(self, cr, uid, invoice_id, context=None):
res = []
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
if context is None:
context = {}
inv = self.pool.get('account.invoice').browse(cr, uid, invoice_id, context=context)
company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id.id
for line in inv.invoice_line:
mres = self.move_line_get_item(cr, uid, line, context)
if not mres:
continue
mres['invl_id']=line.id
res.append(mres)
tax_code_found= False
for tax in tax_obj.compute_all(cr, uid, line.invoice_line_tax_id,
(line.price_unit * (1.0 - (line['discount'] or 0.0) / 100.0)),
line.quantity, line.product_id,
inv.partner_id)['taxes']:
if inv.type in ('out_invoice', 'in_invoice'):
tax_code_id = tax['base_code_id']
tax_amount = line.price_subtotal * tax['base_sign']
else:
tax_code_id = tax['ref_base_code_id']
tax_amount = line.price_subtotal * tax['ref_base_sign']
if tax_code_found:
if not tax_code_id:
continue
res.append(self.move_line_get_item(cr, uid, line, context))
res[-1]['price'] = 0.0
res[-1]['account_analytic_id'] = False
elif not tax_code_id:
continue
tax_code_found = True
res[-1]['tax_code_id'] = tax_code_id
res[-1]['tax_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, tax_amount, context={'date': inv.date_invoice})
def get_price(cr, uid, inv, company_currency,i_line):
cur_obj = self.pool.get('res.currency')
if inv.currency_id.id != company_currency:
price = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, i_line.product_id.standard_price * i_line.quantity, context={'date': inv.date_invoice})
else:
price = i_line.product_id.standard_price * i_line.quantity
return price
if inv.type in ('out_invoice','out_refund'):
for i_line in inv.invoice_line:
if i_line.product_id and i_line.product_id.valuation == 'real_time':
if inv.type == 'out_invoice':
# debit account dacc will be the output account
# first check the product, if empty check the category
dacc = i_line.product_id.property_stock_account_output and i_line.product_id.property_stock_account_output.id
if not dacc:
dacc = i_line.product_id.categ_id.property_stock_account_output_categ and i_line.product_id.categ_id.property_stock_account_output_categ.id
else:
# = out_refund
# debit account dacc will be the input account
# first check the product, if empty check the category
dacc = i_line.product_id.property_stock_account_input and i_line.product_id.property_stock_account_input.id
if not dacc:
dacc = i_line.product_id.categ_id.property_stock_account_input_categ and i_line.product_id.categ_id.property_stock_account_input_categ.id
# in both cases the credit account cacc will be the expense account
# first check the product, if empty check the category
cacc = i_line.product_id.property_account_expense and i_line.product_id.property_account_expense.id
if not cacc:
cacc = i_line.product_id.categ_id.property_account_expense_categ and i_line.product_id.categ_id.property_account_expense_categ.id
if dacc and cacc:
res.append({
'type':'src',
'name': i_line.name[:64],
'price_unit':i_line.product_id.standard_price,
'quantity':i_line.quantity,
'price':get_price(cr, uid, inv, company_currency, i_line),
'account_id':dacc,
'product_id':i_line.product_id.id,
'uos_id':i_line.uos_id.id,
'account_analytic_id': False,
'taxes':i_line.invoice_line_tax_id,
})
res.append({
'type':'src',
'name': i_line.name[:64],
'price_unit':i_line.product_id.standard_price,
'quantity':i_line.quantity,
'price': -1 * get_price(cr, uid, inv, company_currency, i_line),
'account_id':cacc,
'product_id':i_line.product_id.id,
'uos_id':i_line.uos_id.id,
'account_analytic_id': False,
'taxes':i_line.invoice_line_tax_id,
})
elif inv.type in ('in_invoice','in_refund'):
for i_line in inv.invoice_line:
if i_line.product_id and i_line.product_id.valuation == 'real_time':
if i_line.product_id.type != 'service':
# get the price difference account at the product
acc = i_line.product_id.property_account_creditor_price_difference and i_line.product_id.property_account_creditor_price_difference.id
if not acc:
# if not found on the product get the price difference account at the category
acc = i_line.product_id.categ_id.property_account_creditor_price_difference_categ and i_line.product_id.categ_id.property_account_creditor_price_difference_categ.id
a = None
if inv.type == 'in_invoice':
# oa will be the stock input account
# first check the product, if empty check the category
oa = i_line.product_id.property_stock_account_input and i_line.product_id.property_stock_account_input.id
if not oa:
oa = i_line.product_id.categ_id.property_stock_account_input_categ and i_line.product_id.categ_id.property_stock_account_input_categ.id
else:
# = in_refund
# oa will be the stock output account
# first check the product, if empty check the category
oa = i_line.product_id.property_stock_account_output and i_line.product_id.property_stock_account_output.id
if not oa:
oa = i_line.product_id.categ_id.property_stock_account_output_categ and i_line.product_id.categ_id.property_stock_account_output_categ.id
if oa:
# get the fiscal position
fpos = i_line.invoice_id.fiscal_position or False
a = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, oa)
diff_res = []
# calculate and write down the possible price difference between invoice price and product price
for line in res:
if a == line['account_id'] and i_line.product_id.id == line['product_id']:
uom = i_line.product_id.uos_id or i_line.product_id.uom_id
valuation_price_unit = self.pool.get('product.uom')._compute_price(cr, uid, uom.id, i_line.product_id.standard_price, i_line.uos_id.id)
if i_line.product_id.cost_method != 'standard' and i_line.purchase_line_id:
#for average/fifo/lifo costing method, fetch real cost price from incomming moves
stock_move_obj = self.pool.get('stock.move')
valuation_stock_move = stock_move_obj.search(cr, uid, [('purchase_line_id', '=', i_line.purchase_line_id.id)], limit=1, context=context)
if valuation_stock_move:
valuation_price_unit = stock_move_obj.browse(cr, uid, valuation_stock_move[0], context=context).price_unit
if valuation_price_unit != i_line.price_unit and line['price_unit'] == i_line.price_unit and acc:
price_diff = i_line.price_unit - valuation_price_unit
line.update({'price': valuation_price_unit * line['quantity']})
diff_res.append({
'type':'src',
'name': i_line.name[:64],
'price_unit':price_diff,
'quantity':line['quantity'],
'price': price_diff * line['quantity'],
'account_id':acc,
'product_id':line['product_id'],
'uos_id':line['uos_id'],
'account_analytic_id':line['account_analytic_id'],
'taxes':line.get('taxes',[]),
})
res += diff_res
return res
class purchase_order(osv.osv):
_inherit = "purchase.order"
def _prepare_inv_line(self, cr, uid, account_id, order_line, context=None):
"""Collects require data from purchase order line that is used to create invoice line
for that purchase order line
:param account_id: Expense account of the product of PO line if any.
:param browse_record order_line: Purchase order line browse record
:return: Value for fields of invoice lines.
:rtype: dict
"""
return {
'name': order_line.name,
'account_id': account_id,
'price_unit': order_line.price_unit or 0.0,
'quantity': order_line.product_qty,
'product_id': order_line.product_id.id or False,
'uos_id': order_line.product_uom.id or False,
'invoice_line_tax_id': [(6, 0, [x.id for x in order_line.taxes_id])],
'account_analytic_id': order_line.account_analytic_id.id or False,
'purchase_line_id': order_line.id,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
] | |
1bafc04c82fcf2e419a27d47e758c586dc6d95cc | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/compose/2017/12/testcases.py | 9427f3d0dad0bc782d0c9d9d7f167207a3c1d6f0 | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 5,683 | py | from __future__ import absolute_import
from __future__ import unicode_literals
import functools
import os
import pytest
from docker.errors import APIError
from docker.utils import version_lt
from .. import unittest
from compose.cli.docker_client import docker_client
from compose.config.config import resolve_environment
from compose.config.environment import Environment
from compose.const import API_VERSIONS
from compose.const import COMPOSEFILE_V1 as V1
from compose.const import COMPOSEFILE_V2_0 as V2_0
from compose.const import COMPOSEFILE_V2_0 as V2_1
from compose.const import COMPOSEFILE_V2_2 as V2_2
from compose.const import COMPOSEFILE_V2_3 as V2_3
from compose.const import COMPOSEFILE_V3_0 as V3_0
from compose.const import COMPOSEFILE_V3_2 as V3_2
from compose.const import COMPOSEFILE_V3_5 as V3_5
from compose.const import LABEL_PROJECT
from compose.progress_stream import stream_output
from compose.service import Service
SWARM_SKIP_CONTAINERS_ALL = os.environ.get('SWARM_SKIP_CONTAINERS_ALL', '0') != '0'
SWARM_SKIP_CPU_SHARES = os.environ.get('SWARM_SKIP_CPU_SHARES', '0') != '0'
SWARM_SKIP_RM_VOLUMES = os.environ.get('SWARM_SKIP_RM_VOLUMES', '0') != '0'
SWARM_ASSUME_MULTINODE = os.environ.get('SWARM_ASSUME_MULTINODE', '0') != '0'
def pull_busybox(client):
client.pull('busybox:latest', stream=False)
def get_links(container):
links = container.get('HostConfig.Links') or []
def format_link(link):
_, alias = link.split(':')
return alias.split('/')[-1]
return [format_link(link) for link in links]
def engine_max_version():
if 'DOCKER_VERSION' not in os.environ:
return V3_5
version = os.environ['DOCKER_VERSION'].partition('-')[0]
if version_lt(version, '1.10'):
return V1
if version_lt(version, '1.12'):
return V2_0
if version_lt(version, '1.13'):
return V2_1
if version_lt(version, '17.06'):
return V3_2
return V3_5
def min_version_skip(version):
return pytest.mark.skipif(
engine_max_version() < version,
reason="Engine version %s is too low" % version
)
def v2_only():
return min_version_skip(V2_0)
def v2_1_only():
return min_version_skip(V2_1)
def v2_2_only():
return min_version_skip(V2_2)
def v2_3_only():
return min_version_skip(V2_3)
def v3_only():
return min_version_skip(V3_0)
class DockerClientTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
version = API_VERSIONS[engine_max_version()]
cls.client = docker_client(Environment(), version)
@classmethod
def tearDownClass(cls):
del cls.client
def tearDown(self):
for c in self.client.containers(
all=True,
filters={'label': '%s=composetest' % LABEL_PROJECT}):
self.client.remove_container(c['Id'], force=True)
for i in self.client.images(
filters={'label': 'com.docker.compose.test_image'}):
try:
self.client.remove_image(i, force=True)
except APIError as e:
if e.is_server_error():
pass
volumes = self.client.volumes().get('Volumes') or []
for v in volumes:
if 'composetest_' in v['Name']:
self.client.remove_volume(v['Name'])
networks = self.client.networks()
for n in networks:
if 'composetest_' in n['Name']:
self.client.remove_network(n['Name'])
def create_service(self, name, **kwargs):
if 'image' not in kwargs and 'build' not in kwargs:
kwargs['image'] = 'busybox:latest'
if 'command' not in kwargs:
kwargs['command'] = ["top"]
kwargs['environment'] = resolve_environment(
kwargs, Environment.from_env_file(None)
)
labels = dict(kwargs.setdefault('labels', {}))
labels['com.docker.compose.test-name'] = self.id()
return Service(name, client=self.client, project='composetest', **kwargs)
def check_build(self, *args, **kwargs):
kwargs.setdefault('rm', True)
build_output = self.client.build(*args, **kwargs)
stream_output(build_output, open('/dev/null', 'w'))
def require_api_version(self, minimum):
api_version = self.client.version()['ApiVersion']
if version_lt(api_version, minimum):
pytest.skip("API version is too low ({} < {})".format(api_version, minimum))
def get_volume_data(self, volume_name):
if not is_cluster(self.client):
return self.client.inspect_volume(volume_name)
volumes = self.client.volumes(filters={'name': volume_name})['Volumes']
assert len(volumes) > 0
return self.client.inspect_volume(volumes[0]['Name'])
def is_cluster(client):
if SWARM_ASSUME_MULTINODE:
return True
def get_nodes_number():
try:
return len(client.nodes())
except APIError:
# If the Engine is not part of a Swarm, the SDK will raise
# an APIError
return 0
if not hasattr(is_cluster, 'nodes') or is_cluster.nodes is None:
# Only make the API call if the value hasn't been cached yet
is_cluster.nodes = get_nodes_number()
return is_cluster.nodes > 1
def no_cluster(reason):
def decorator(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if is_cluster(self.client):
pytest.skip("Test will not be run in cluster mode: %s" % reason)
return
return f(self, *args, **kwargs)
return wrapper
return decorator
| [
"[email protected]"
] | |
bc76b4c9093b72508244a0815238c338afd49775 | 7dc65b6d2e857c807bd2f75e2586af5f8e933fe5 | /tcutils/pkgs/Traffic/traffic/core/stream.py | 69e5a78e7c8813a72c1172ca57325b026cbe268e | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | vkolli/contrail-test-perf | d6fdc20f4a2004066c5a6316afd915ecdc9366c2 | db04b8924a2c330baabe3059788b149d957a7d67 | refs/heads/master | 2021-01-18T15:36:18.120487 | 2017-03-30T19:19:30 | 2017-03-30T19:19:30 | 86,661,522 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,933 | py | """Module to create traffic stream.
It just parses the arguments given by the user and fills up the approprite
protocol header.
This needs to be extended for new protocol streams with new protocol.
"""
import sys
import inspect
try:
# Running from the source repo "test".
from tcutils.pkgs.Traffic.traffic.utils.logger import LOGGER, get_logger
from tcutils.pkgs.Traffic.traffic.utils.globalvars import LOG_LEVEL
from tcutils.pkgs.Traffic.traffic.utils.util import is_v6, is_v4
except ImportError:
# Distributed and installed as package
from traffic.utils.logger import LOGGER, get_logger
from traffic.utils.globalvars import LOG_LEVEL
from traffic.utils.util import is_v6, is_v4
LOGGER = "%s.core.listener" % LOGGER
log = get_logger(name=LOGGER, level=LOG_LEVEL)
def help(header="all"):
"""lists the keywords of fields available in currenlty implemented
protocols.
This is a helper method to the users to get the list of fields,
before creating a stream.
Usage:
import stream
stream.help()
stream.help("IPHeader")
"""
clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
if not header == "all":
clsmembers = filter(lambda x: x[0] == header, clsmembers)
for clsname, clsmember in clsmembers:
clsobj = clsmember()
clsattrs = dir(clsobj)
if "fields" in clsattrs:
print clsname, ": ", clsobj.fields
if "options" in clsattrs:
print clsname, ": ", clsobj.options
class Stream(object):
def __init__(self, **kwargs):
if not kwargs:
# Just for getting Help.
return
self.all_fields = kwargs
try:
self.protocol = self.all_fields['protocol']
except KeyError:
self.protocol = "ip" # Defualt L3 protocol.
dst = self.all_fields['dst']
if is_v6(dst):
self.protocol = "ipv6"
try:
proto = self.all_fields['proto']
except KeyError, err:
print err, "Must specify proto."
if 'dst' in self.all_fields.keys():
self.all_fields['dst'] = str(self.all_fields['dst'])
self.l2 = self._eth_header()
if self.protocol == 'ip':
self.l3 = self._ip_header()
elif self.protocol == 'ipv6':
self.l3 = self._ip6_header()
if proto == 'tcp':
self.l4 = self._tcp_header()
elif proto == 'udp':
self.l4 = self._udp_header()
elif proto == 'icmp':
self.l4 = self._icmp_header()
def _eth_header(self):
return {}
def _ip_header(self):
return IPHeader(**self.all_fields).get_header()
def _ip6_header(self):
return IP6Header(**self.all_fields).get_header()
def _tcp_header(self):
return TCPHeader(**self.all_fields).get_header()
def _udp_header(self):
return UDPHeader(**self.all_fields).get_header()
def _icmp_header(self):
if self.protocol == 'ipv6':
return None
return ICMPHeader(**self.all_fields).get_header()
def get_l4_proto(self):
return getattr(self.l3, 'proto', None) or \
getattr(self.l3, 'nh', None).lower()
class Header(object):
def __init__(self, fields={}):
for key, val in fields.items():
self.__setattr__(key, val)
class AnyHeader(object):
def __init__(self, **kwargs):
self.all_fields = kwargs
try:
self.all_fields.update({'sport': int(self.all_fields['sport'])})
self.all_fields.update({'dport': int(self.all_fields['dport'])})
self.all_fields.update({'inter': int(self.all_fields['inter'])})
except KeyError:
pass
def create_header(self, fields):
header = {}
for field in fields:
if field in self.all_fields.keys():
if field == "iplen": # UDP also has len
field = "len"
if field == "ipflags": # TCP also has flags
field = "flags"
header.update({field: self.all_fields[field]})
return header
class TCPHeader(AnyHeader):
def __init__(self, **kwargs):
super(TCPHeader, self).__init__(**kwargs)
# Set got from "fields_desc" attribute of protocol headers in scapy.
self.fields = ("sport", "dport", "seq", "ack", "dataofs", "reserved",
"flags", "window", "chksum", "urgptr")
self.options = ("EOL", "NOP", "MSS", "WScale", "SAckOK", "SAck",
"Timestamp", "AltChkSum", "AltChkSumOpt")
def get_header(self):
header = self.create_header(self.fields)
options = self.create_header(self.options)
if options:
header.update({'options': options})
return Header(header)
class UDPHeader(AnyHeader):
def __init__(self, **kwargs):
super(UDPHeader, self).__init__(**kwargs)
# Set got from "fields_desc" attribute of protocol headers in scapy.
self.fields = ("sport", "dport", "len", "chksum")
def get_header(self):
header = self.create_header(self.fields)
return Header(header)
class ICMPHeader(AnyHeader):
def __init__(self, **kwargs):
super(ICMPHeader, self).__init__(**kwargs)
# Set got from "fields_desc" attribute of protocol headers in scapy.
self.fields = ("type", "code", "chksum", "id", "seq", "ts_ori", "ts_rx"
"ts_tx", "gw", "ptr", "reserved", "addr_mask")
def get_header(self):
header = self.create_header(self.fields)
return Header(header)
class IPHeader(AnyHeader):
def __init__(self, **kwargs):
super(IPHeader, self).__init__(**kwargs)
# Set got from "fields_desc" attribute of protocol headers in scapy.
self.fields = ("version", "ihl", "tos", "iplen", "id", "ipflags",
"frag", "ttl", "proto", "ipchksum", "src", "dst",
"options")
def get_header(self):
header = self.create_header(self.fields)
return Header(header)
class IP6Header(AnyHeader):
def __init__(self, **kwargs):
super(IP6Header, self).__init__(**kwargs)
# Set got from "fields_desc" attribute of protocol headers in scapy.
self.fields = ("version", "tc", "fl", "iplen", "nh", "proto",
"hlim", "ttl", "src", "dst")
def get_header(self):
header = self.create_header(self.fields)
hdr_obj = Header(header)
if hasattr(hdr_obj, 'proto'):
hdr_obj.nh = hdr_obj.proto.upper()
if 'ICMP' in hdr_obj.nh:
hdr_obj.nh = 'ICMPv6'
del hdr_obj.proto
if hasattr(hdr_obj, 'ttl'):
hdr_obj.hlim = hdr_obj.ttl
del hdr_obj.ttl
return hdr_obj
| [
"[email protected]"
] | |
d32b3980d348ba60a4b957a7225a211d4f803884 | 08615c64a62fc364a802bb92314cf49080ddbcee | /new-day02/07.对象/04.类型的判断.py | e91be84d6a4c82303ee691ee3e998464c7d94134 | [] | no_license | xiangys0134/python_study | afc4591fca1db6ebddf83f0604e35ed2ef614728 | 6ec627af7923b9fd94d244c561297ccbff90c1e9 | refs/heads/master | 2023-02-24T01:24:45.734510 | 2022-10-29T02:11:20 | 2022-10-29T02:11:20 | 143,358,792 | 2 | 0 | null | 2023-02-08T03:07:26 | 2018-08-03T00:43:46 | Python | UTF-8 | Python | false | false | 198 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
class A:
pass
class B(A):
pass
# print(issubclass(B,A))
a = A()
b = B()
print(isinstance(a,A))
print(isinstance(b,A))
print(isinstance(a,B)) | [
"[email protected]"
] | |
765bf542a781b1ad6e4337b542b13342b17bbcb6 | 374b3f27fe3cf032e88eccac5992c83eba0ad1b2 | /tutorials/W3D5_NetworkCausality/solutions/W3D5_Tutorial4_Solution_431a3d57.py | 9719ed5491a6af4bb92107ac81919cd72f403124 | [
"CC-BY-4.0",
"BSD-3-Clause"
] | permissive | NeuromatchAcademy/course-content | e2fdca96bcbdc78afaa209e4e77438f44a56c82d | 3d638d00f02d9fd269fa2aff7d062558afdcb126 | refs/heads/main | 2023-08-16T16:09:09.314153 | 2023-08-02T06:21:49 | 2023-08-02T06:21:49 | 262,856,980 | 2,678 | 1,079 | CC-BY-4.0 | 2023-08-17T00:32:24 | 2020-05-10T19:09:05 | Jupyter Notebook | UTF-8 | Python | false | false | 909 | py | def fit_first_stage(T, Z):
"""
Estimates T_hat as the first stage of a two-stage least squares.
Args:
T (np.ndarray): our observed, possibly confounded, treatment of shape (n, 1)
Z (np.ndarray): our observed instruments of shape (n, 1)
Returns
T_hat (np.ndarray): our estimate of the unconfounded portion of T
"""
# Initialize linear regression model
stage1 = LinearRegression(fit_intercept=True)
# Fit linear regression model
stage1.fit(Z, T)
# Predict T_hat using linear regression model
T_hat = stage1.predict(Z)
return T_hat
# Estimate T_hat
T_hat = fit_first_stage(T, Z)
# Get correlations
T_C_corr = np.corrcoef(T.transpose(), C.transpose())[0, 1]
T_hat_C_corr = np.corrcoef(T_hat.transpose(), C.transpose())[0, 1]
# Print correlations
print(f"Correlation between T and C: {T_C_corr:.3f}")
print(f"Correlation between T_hat and C: {T_hat_C_corr:.3f}") | [
"[email protected]"
] | |
bfb31af3eba9876a04ed0655b16df30fb0c8340b | 3e6d3e9585c24cb0c29616778ad2e304241a3a65 | /dockerhub_show_tags.py | 52febfd188001ee975c11201c830a2941ba06f89 | [] | no_license | blueroutecn/pytools | 442ef301f5a1b1ef5ce1a22dfe3027873b262934 | 64cb4f6e253bc1f08491874a0db9bcea2ae3dcfd | refs/heads/master | 2021-01-19T14:52:14.886782 | 2017-08-20T21:28:07 | 2017-08-20T21:28:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,675 | py | #!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2016-05-10 11:26:49 +0100 (Tue, 10 May 2016)
#
# https://github.com/harisekhon/pytools
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn
# and optionally send me feedback to help improve this or other code I publish
#
# https://www.linkedin.com/in/harisekhon
#
"""
Tool to show Docker tags for one or more DockerHub repos
Written for convenience as Docker CLI doesn't currently support this:
See https://github.com/docker/docker/issues/17238
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#from __future__ import unicode_literals
import json
import logging
import os
import sys
import traceback
import urllib
try:
import requests
except ImportError:
print(traceback.format_exc(), end='')
sys.exit(4)
srcdir = os.path.abspath(os.path.dirname(__file__))
libdir = os.path.join(srcdir, 'pylib')
sys.path.append(libdir)
try:
# pylint: disable=wrong-import-position
from harisekhon.utils import log, die, prog, isJson, jsonpp
from harisekhon import CLI
except ImportError as _:
print(traceback.format_exc(), end='')
sys.exit(4)
__author__ = 'Hari Sekhon'
__version__ = '0.4'
class DockerHubTags(CLI):
def __init__(self):
# Python 2.x
super(DockerHubTags, self).__init__()
# Python 3.x
# super().__init__()
self._CLI__parser.usage = '{0} [options] repo1 repo2 ...'.format(prog)
self.quiet = False
self.timeout_default = 30
def add_options(self):
self.add_opt('-q', '--quiet', action='store_true', default=False,
help='Output only the tags, one per line (useful for shell tricks)')
def run(self):
if not self.args:
self.usage('no repos given as args')
self.quiet = self.get_opt('quiet')
if not self.quiet:
print('\nDockerHub\n')
for arg in self.args:
self.print_tags(arg)
def print_tags(self, repo):
if not self.quiet:
print('repo: {0}'.format(repo))
print('tags: ', end='')
sys.stdout.flush()
indent = ' '
if self.quiet:
indent = ''
print('\n{0}'.format(indent).join(self.get_tags(repo)))
if not self.quiet:
print()
def get_tags(self, repo):
namespace = 'library'
if '/' in repo:
(namespace, repo) = repo.split('/', 2)
# there is another endpoint but it requires authentication
url = 'https://registry.hub.docker.com/v2/repositories/{0}/{1}/tags/'\
.format(urllib.quote_plus(namespace), urllib.quote_plus(repo))
tag_list = []
while True:
(tags, url) = self.query(url)
tag_list += tags
if not url:
break
tag_list.sort()
# put latest to the top of the list
try:
tag_list.insert(0, tag_list.pop(tag_list.index('latest')))
except ValueError:
pass
return tag_list
@staticmethod
def query(url):
log.debug('GET %s' % url)
try:
verify = True
# workaround for Travis CI and older pythons - we're not exchanging secret data so this is ok
#if os.getenv('TRAVIS'):
# verify = False
req = requests.get(url, verify=verify)
except requests.exceptions.RequestException as _:
die(_)
log.debug("response: %s %s", req.status_code, req.reason)
log.debug("content:\n%s\n%s\n%s", '='*80, req.content.strip(), '='*80)
if req.status_code != 200:
die("%s %s" % (req.status_code, req.reason))
if not isJson(req.content):
die('invalid non-JSON response from DockerHub!')
if log.isEnabledFor(logging.DEBUG):
print(jsonpp(req.content))
print('='*80)
tag_list = []
try:
j = json.loads(req.content)
tag_list = [_['name'] for _ in j['results']]
# could perhaps stack overflow in some scenario
# not as functional programming 'cool' but will do own tail recursion and just while loop instead
#if 'next' in j and j['next']:
# tag_list += self.query(j['next'])
return (tag_list, j['next'])
except KeyError as _:
die('failed to parse output from DockerHub (format may have changed?): {0}'.format(_))
if __name__ == '__main__':
DockerHubTags().main()
| [
"[email protected]"
] | |
e3db1b132f2bd7bbf515311c73de37ae081a5770 | f8580d2c963b6a3c34e918e0743d0a503a9584bd | /etg/unfinished/choicebk.py | 3ed5657f87283336a95d87df56beb9f6b2922919 | [] | no_license | pypy/wxpython-cffi | f59c3faeed26e6a26d0c87f4f659f93e5366af28 | 877b7e6c1b5880517456f1960db370e4bb7f5c90 | refs/heads/master | 2023-07-08T21:13:22.765786 | 2016-12-02T22:10:45 | 2016-12-02T22:10:45 | 397,124,697 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,023 | py | #---------------------------------------------------------------------------
# Name: etg/choicebk.py
# Author: Robin Dunn
#
# Created: 18-Jun-2012
# Copyright: (c) 2013 by Total Control Software
# License: wxWindows License
#---------------------------------------------------------------------------
import etgtools
import etgtools.tweaker_tools as tools
PACKAGE = "wx"
MODULE = "_core"
NAME = "choicebk" # Base name of the file to generate to for this script
DOCSTRING = ""
# The classes and/or the basename of the Doxygen XML files to be processed by
# this script.
ITEMS = [ "wxChoicebook",
]
#---------------------------------------------------------------------------
def run():
# Parse the XML file(s) building a collection of Extractor objects
module = etgtools.ModuleDef(PACKAGE, MODULE, NAME, DOCSTRING)
etgtools.parseDoxyXML(module, ITEMS)
#-----------------------------------------------------------------
# Tweak the parsed meta objects in the module object as needed for
# customizing the generated code and docstrings.
module.addHeaderCode('#include <wx/choicebk.h>')
c = module.find('wxChoicebook')
assert isinstance(c, etgtools.ClassDef)
tools.fixWindowClass(c)
tools.fixBookctrlClass(c)
module.addPyCode("""\
EVT_CHOICEBOOK_PAGE_CHANGED = wx.PyEventBinder( wxEVT_CHOICEBOOK_PAGE_CHANGED, 1 )
EVT_CHOICEBOOK_PAGE_CHANGING = wx.PyEventBinder( wxEVT_CHOICEBOOK_PAGE_CHANGING, 1 )
# deprecated wxEVT aliases
wxEVT_COMMAND_CHOICEBOOK_PAGE_CHANGED = wxEVT_CHOICEBOOK_PAGE_CHANGED
wxEVT_COMMAND_CHOICEBOOK_PAGE_CHANGING = wxEVT_CHOICEBOOK_PAGE_CHANGING
""")
#-----------------------------------------------------------------
tools.doCommonTweaks(module)
tools.runGenerators(module)
#---------------------------------------------------------------------------
if __name__ == '__main__':
run()
| [
"[email protected]"
] | |
81e6205099f506d87d51c23755b296cd247d02f8 | 2cafc4981f85e9a25cceb18af1e936e19268e0ee | /scapy_icmp_discovery.py | 1121df8df3afe58afbfc56b36a3501956c738cca | [] | no_license | lapinrepository/ethicalhacking | fdd0647bffeb87544ede182eb62544ee922579fd | 14fac0bee8ca5f58c5499e4e91323e005a5e6c25 | refs/heads/master | 2021-10-09T15:14:29.976534 | 2018-12-30T09:30:19 | 2018-12-30T09:30:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | #!/usr/bin/python
import logging
import subprocess
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from scapy.all import *
import threading
screenlock = threading.Semaphore(value=1)
def icmpscan(prefix, addr):
try:
answer = sr1(IP(dst=prefix + str(addr))/ICMP(),timeout=1, verbose=0)
screenlock.acquire()
if answer == None:
pass
else:
print("[+] Host " + prefix + str(addr) + " is alive")
except:
pass
finally:
screenlock.release()
if len(sys.argv) != 2:
print("Usage scapy_icmp_discovery.py [interface]")
print("Example: scapy_icmp_discovery.py eth0")
sys.exit()
interface = str(sys.argv[1])
ip = subprocess.check_output("ifconfig " + interface + " | grep 'inet' | cut -d ' ' -f 1 | cut -d 'n' -f 2 | cut -d ' ' -f 2", shell=True).strip()
prefix = ip.split('.')[0] + '.' + ip.split('.')[1] + '.' + ip.split('.')[2] + '.'
reply_ip = list()
for addr in range(0, 254):
t = threading.Thread(target = icmpscan, args=(prefix, addr))
t.start()
#for addr in range(0,254):#
# answer = sr1(IP(dst=prefix + str(addr)) / ICMP(), timeout=1, verbose=0)
# if answer == None:
# pass
# else:
# reply_ip.append(prefix + str(addr))
#
#for elt in reply_ip:
# print(elt)
| [
"[email protected]"
] | |
b67e84a4c5fdfc6733b4df7afb9f05a1515a26f9 | 86d9b76692c9fecd26ca3a5bd358119b2f3697dd | /set/change-set.py | e3e2f23ea9cd5507d864419915c48b61241c174d | [] | no_license | khanhnt99/Pythonbook | ec7fa2ff3495ba07e838c9e910aa0758e8f7ae94 | 850e259e6293413e951c77e448d95309dd4f4fad | refs/heads/master | 2022-12-19T09:14:06.193040 | 2020-09-23T10:11:24 | 2020-09-23T10:11:24 | 288,210,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 218 | py | '''
set la tap hop cac phan tu khong co thu tu nen index khong co y nghia'
'''
my_set={1,3}
print(my_set)
my_set.add(2)
print(my_set)
my_set.update([2,3,4])
print(my_set)
my_set.update([4,5],{1,6,8})
print(my_set)
| [
"[email protected]"
] | |
6a021d69064c81144ccb9992145eba87d4ad725f | 7fd7b2de2e1d2317660b7085e3984214ab3f4c9d | /organice/bin/organice_setup.py | 5787ac48176901ebcd5fd3a47da3b9cd26f92108 | [
"Apache-2.0"
] | permissive | brndna/django-organice | 12ae2e1ce1d1f2b79d4ef7932729269c5996a8f5 | 0bea02e0025073ba3e9645b42cff951943400df1 | refs/heads/master | 2020-12-11T06:07:05.363812 | 2015-09-29T20:36:49 | 2015-09-29T21:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,702 | py | #!/usr/bin/env python
#
# Copyright 2014-2015 Peter Bittner <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Setup script for starting a django Organice project.
"""
from organice.management.settings import DjangoModuleManager, DjangoSettingsManager
from argparse import ArgumentParser
from stat import S_IRUSR, S_IWUSR, S_IXUSR, S_IRGRP, S_IXGRP, S_IROTH, S_IXOTH
from subprocess import call
import django.conf
import django.template
import errno
import os
# global variables (a class with members would be too verbose) *nirg*
args = None
profiles = None
projectname = None
settings = None
def safe_delete(filename):
"""
Make a best-effort delete without raising an exception when file didn't
exist (no race condition). All other errors raise their usual exception.
"""
try:
os.remove(filename)
except OSError as e:
if e.errno != errno.ENOENT: # no such file
raise # re-raise exception for any other error
def safe_rename(source, target):
"""
Perform a forced rename of a file, overwriting an existing target.
If the source file doesn't exist the target is simply deleted.
"""
safe_delete(target)
try:
os.rename(source, target)
except OSError as e:
if e.errno != errno.ENOENT: # no such file
raise # re-raise exception for any other error
def adding_settings_for(section):
"""Simple helper for progress printouts"""
return 'Adding settings for %s ...' % section
def _print_verbose(vlevel, message):
"""Print text to stdout in accordance with user-specified verbosity level."""
global args
if args.verbosity >= vlevel:
print(message)
def _evaluate_command_line():
global projectname
global args
usage_descr = 'django Organice setup. Start getting organiced! ' \
'Your collaboration platform starts here.'
help_account = 'Organice account name used as subdomain (default: projectname)'
help_domain = 'optional domain name to enforce'
help_engine = 'database engine (for profiles: staging, production)'
help_database = 'database name (for profiles: staging, production)'
help_username = 'database user (for profiles: staging, production)'
help_password = 'database password (for profiles: staging, production)'
help_manage = 'use default single manage.py or use multi-settings variant (default: %(default)s)'
help_webserver = 'create appropriate web server configuration (default: %(default)s)'
help_set = 'set the value of a settings variable in a destination file (this option can be used several times)'
help_verbosity = 'Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output'
parser = ArgumentParser(description=usage_descr)
parser.add_argument('projectname', help='name of project to create')
parser.add_argument('--account', help=help_account)
parser.add_argument('--domain', help=help_domain)
parser.add_argument('--engine', choices=['postgresql_psycopg2', 'mysql', 'oracle'], help=help_engine)
parser.add_argument('--database', help=help_database)
parser.add_argument('--username', help=help_username)
parser.add_argument('--password', help=help_password)
parser.add_argument('--manage', choices=['single', 'multi'], default='single', help=help_manage)
parser.add_argument('--webserver', choices=['apache', 'lighttp'], default='apache', help=help_webserver)
parser.add_argument('--set', help=help_set, nargs=3, metavar=('dest', 'var', 'value'), action='append')
parser.add_argument('--verbosity', '-v', type=int, choices=range(4), default=3, help=help_verbosity)
args = parser.parse_args()
projectname = args.projectname
def _create_project():
global args
global projectname
manage_script_name = 'manage.py'
manage_delete_name = 'manage.py~deleted'
mode0755 = S_IRUSR | S_IWUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH
if args.manage == 'multi':
if os.path.isfile(manage_script_name):
_print_verbose(2, 'Deleting manage.py to allow multi-settings platform setup ...')
safe_rename(manage_script_name, manage_delete_name)
_print_verbose(2, 'Generating project %s ...' % projectname)
code = call(['django-admin.py', 'startproject', projectname, '.'])
if code != 0:
if args.manage == 'multi':
_print_verbose(1, 'Restoring original manage.py ...')
safe_rename(manage_delete_name, manage_script_name)
raise SystemExit(code)
os.chmod(manage_script_name, mode0755)
if args.manage == 'multi':
safe_delete(manage_delete_name)
_print_verbose(2, 'Removing project specific configuration from manage.py ...')
with open(manage_script_name, 'a+') as f:
lines = f.readlines()
f.seek(0)
f.truncate()
for line in lines:
if 'import os' not in line and 'DJANGO_SETTINGS_MODULE' not in line:
f.write(line)
def _split_project():
global args
global profiles
global projectname
global settings
profiles = ('develop', 'staging', 'production')
filenames = ('__init__', 'common') + profiles
_print_verbose(2, 'Creating directories ...')
os.mkdir('%s.media' % projectname)
os.mkdir('%s.static' % projectname)
os.mkdir('%s.templates' % projectname)
os.mkdir(os.path.join(projectname, 'settings'))
_print_verbose(2, 'Converting settings to deployment profiles (%s) ...' % ', '.join(profiles))
os.rename(os.path.join(projectname, 'settings.py'),
os.path.join(projectname, 'settings', 'common.py'))
settings = DjangoSettingsManager(projectname, *filenames)
settings.append_lines('__init__',
'"""',
'Modularized settings generated by django Organice setup. http://organice.io',
'This solution follows the second recommendation from',
'http://www.sparklewise.com/django-settings-for-production-and-development-best-practices/',
'"""',
'from .develop import * # noqa')
for prof in profiles:
settings.append_lines(prof,
'# Django project settings for %s environment' % prof.capitalize(),
'',
'from .common import * # noqa')
# out-of-the-box Django values relevant for deployment
settings.delete_var('common', 'SITE_ID')
settings.insert_lines('common',
'_ = lambda s: s',
'',
'SITE_ID = 1')
settings.replace_line('common', 'import os', 'from os.path import abspath, dirname, join')
settings.set_value('common', 'BASE_DIR', 'dirname(dirname(dirname(abspath(__file__))))')
settings.set_value('common', 'MEDIA_ROOT', "join(BASE_DIR, '%s.media')" % projectname)
settings.set_value('common', 'STATIC_ROOT', "join(BASE_DIR, '%s.static')" % projectname)
settings.set_value('common', 'MEDIA_URL', "'/media/'")
settings.set_value('common', 'USE_I18N', False)
settings.move_var('common', profiles, 'DEBUG')
settings.move_var('common', profiles, 'ALLOWED_HOSTS')
settings.move_var('common', profiles, 'DATABASES')
settings.move_var('common', profiles, 'MEDIA_ROOT')
settings.move_var('common', profiles, 'STATIC_ROOT')
settings.move_var('common', profiles, 'SECRET_KEY')
for prof in ('staging', 'production'):
settings.set_value(prof, 'DEBUG', False)
settings.set_value_lines(prof, 'ALLOWED_HOSTS', '[',
" '%s.organice.io'," % (args.account if args.account else projectname),
" '%s'," % (args.domain if args.domain else 'www.example.com'),
']')
def _configure_database():
global args
global projectname
global settings
db_template = django.template.Template("""{
'default': {
'ENGINE': 'django.db.backends.{{ engine }}', # 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': {{ database|safe }}, # path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '{{ username|safe }}',
'PASSWORD': '{{ password|safe }}',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}""")
db_context = django.template.Context({
'engine': 'sqlite3',
'database': "join(BASE_DIR, '%s.sqlite')" % projectname,
'username': '',
'password': '',
})
_print_verbose(2, 'Configuring database for all profiles ...')
settings.set_value('develop', 'DATABASES', db_template.render(db_context))
db_context['engine'] = args.engine if args.engine else ''
db_context['database'] = "'%s'" % (args.database if args.database else '')
db_context['username'] = args.username if args.username else ''
db_context['password'] = args.password if args.password else ''
for prof in ('staging', 'production'):
settings.set_value(prof, 'DATABASES', db_template.render(db_context))
def _configure_installed_apps():
global settings
_print_verbose(2, adding_settings_for('installed apps'))
settings.delete_var('common', 'INSTALLED_APPS')
settings.append_lines('common',
'INSTALLED_APPS = (',
" # 'organice_theme_add-your-theme-here',",
" 'organice_theme',",
" 'organice',",
" 'cms',",
" # 'mptt',",
" 'menus',",
" 'sekizai',",
" 'treebeard',",
" 'easy_thumbnails',",
" 'djangocms_admin_style',",
" 'djangocms_file',",
" 'djangocms_flash',",
" 'djangocms_googlemap',",
" 'djangocms_inherit',",
" 'djangocms_link',",
" 'djangocms_picture',",
" 'djangocms_teaser',",
" 'djangocms_video',",
" 'django_comments',",
" 'django.contrib.auth',",
" 'django.contrib.contenttypes',",
" 'django.contrib.sessions',",
" 'django.contrib.sites',",
" 'django.contrib.messages',",
" 'django.contrib.staticfiles',",
" 'django.contrib.admin',",
" # 'media_tree',",
" # 'media_tree.contrib.cms_plugins.media_tree_image',",
" # 'media_tree.contrib.cms_plugins.media_tree_gallery',",
" # 'media_tree.contrib.cms_plugins.media_tree_slideshow',",
" # 'media_tree.contrib.cms_plugins.media_tree_listing',",
" # 'form_designer.contrib.cms_plugins.form_designer_form',",
" 'cmsplugin_zinnia',",
" 'tagging',",
" 'todo',",
" # 'emencia.django.newsletter',",
" 'tinymce',",
" 'simple_links',",
" 'zinnia',",
" 'allauth',",
" 'allauth.account',",
" 'allauth.socialaccount',",
" 'allauth.socialaccount.providers.amazon',",
" # 'allauth.socialaccount.providers.angellist',",
" 'allauth.socialaccount.providers.bitbucket',",
" 'allauth.socialaccount.providers.bitly',",
" 'allauth.socialaccount.providers.dropbox',",
" # 'allauth.socialaccount.providers.facebook',",
" # 'allauth.socialaccount.providers.flickr',",
" # 'allauth.socialaccount.providers.feedly',",
" 'allauth.socialaccount.providers.github',",
" 'allauth.socialaccount.providers.google',",
" 'allauth.socialaccount.providers.instagram',",
" # 'allauth.socialaccount.providers.linkedin',",
" 'allauth.socialaccount.providers.linkedin_oauth2',",
" # 'allauth.socialaccount.providers.openid',",
" # 'allauth.socialaccount.providers.persona',",
" 'allauth.socialaccount.providers.soundcloud',",
" 'allauth.socialaccount.providers.stackexchange',",
" # 'allauth.socialaccount.providers.tumblr',",
" # 'allauth.socialaccount.providers.twitch',",
" # 'allauth.socialaccount.providers.twitter',",
" 'allauth.socialaccount.providers.vimeo',",
" # 'allauth.socialaccount.providers.vk',",
" # 'allauth.socialaccount.providers.weibo',",
" 'allauth.socialaccount.providers.xing',",
" 'analytical',",
')')
def _configure_authentication():
global settings
_print_verbose(2, adding_settings_for('user profiles and authentication'))
settings.delete_var('common', 'SERVER_EMAIL')
settings.set_value_lines('common', 'ADMINS',
'(',
" ('Your Name', '[email protected]'),",
')',
'SERVER_EMAIL = ADMINS[0][1]',
'DEFAULT_FROM_EMAIL = SERVER_EMAIL')
settings.append_lines('common',
'AUTHENTICATION_BACKENDS = (',
" 'django.contrib.auth.backends.ModelBackend',",
" 'allauth.account.auth_backends.AuthenticationBackend',",
')')
settings.set_value('common', 'ACCOUNT_AUTHENTICATION_METHOD', "'email'")
settings.set_value('common', 'ACCOUNT_EMAIL_REQUIRED', True)
settings.set_value('common', 'ACCOUNT_USERNAME_REQUIRED', False)
settings.set_value('common', 'LOGIN_REDIRECT_URL', "'/'")
settings.set_value('common', 'LOGIN_URL', "'/login'")
settings.set_value('common', 'LOGOUT_URL', "'/logout'")
settings.set_value('develop', 'EMAIL_BACKEND', "'django.core.mail.backends.console.EmailBackend'")
def _configure_templates():
global projectname
global settings
_print_verbose(2, adding_settings_for('Django templates'))
settings.delete_from_list('common',
["TEMPLATES = [", "{"],
"'APP_DIRS': True")
settings.append_to_list('common',
["TEMPLATES = [", "{", "'DIRS': ["],
"join(BASE_DIR, '%s.templates')" % projectname,
"join(BASE_DIR, '%s.templates', 'zinnia')" % projectname)
settings.append_to_list('common',
["TEMPLATES = [", "{", "'OPTIONS': {", "'context_processors': ["],
"'django.template.context_processors.i18n'",
"'django.template.context_processors.media'",
"'django.template.context_processors.static'",
"'sekizai.context_processors.sekizai'",
"'cms.context_processors.cms_settings'",
"'organice.context_processors.expose'")
settings.append_to_list('common',
["TEMPLATES = [", "{", "'OPTIONS': {"],
"'loaders': []")
settings.append_to_list('common',
["TEMPLATES = [", "{", "'OPTIONS': {", "'loaders': ["],
"'apptemplates.Loader'",
"'django.template.loaders.filesystem.Loader'",
"'django.template.loaders.app_directories.Loader'")
settings.append_to_list('common',
["TEMPLATES = [", "{", "'OPTIONS': {"],
"'debug': True",
"# 'string_if_invalid': '|INVALID) %s (INVALID|'",
"# see https://docs.djangoproject.com/en/1.8/ref/settings/#template-string-if-invalid ")
def _configure_cms():
global projectname
global settings
_print_verbose(2, adding_settings_for('django CMS'))
settings.append_to_list('common',
['MIDDLEWARE_CLASSES = ('],
"'django.middleware.locale.LocaleMiddleware'",
"'solid_i18n.middleware.SolidLocaleMiddleware'",
"'cms.middleware.page.CurrentPageMiddleware'",
"'cms.middleware.user.CurrentUserMiddleware'",
"'cms.middleware.toolbar.ToolbarMiddleware'",
"'cms.middleware.language.LanguageCookieMiddleware'")
# must be set both in order to make solid_i18n work properly
settings.set_value_lines('common', 'LANGUAGE_CODE', "'en'",
'LANGUAGES = (',
" ('en', _('English')),",
" ('de', _('German')),",
" ('it', _('Italian')),",
')')
settings.set_value('common', 'CMS_USE_TINYMCE', False)
settings.append_lines('common',
'CMS_TEMPLATES = (',
" ('cms_base.html', 'Template for normal content pages'),",
" ('cms_bookmarks.html', 'Template for the bookmarks page'),",
')')
settings.append_lines('common',
'MEDIA_TREE_MEDIA_BACKENDS = (',
" 'media_tree.contrib.media_backends.easy_thumbnails.EasyThumbnailsBackend',",
')')
settings.append_lines('common',
'MIGRATION_MODULES = {',
" 'djangocms_file': 'djangocms_file.migrations_django',",
" 'djangocms_flash': 'djangocms_flash.migrations_django',",
" 'djangocms_googlemap': 'djangocms_googlemap.migrations_django',",
" 'djangocms_inherit': 'djangocms_inherit.migrations_django',",
" 'djangocms_link': 'djangocms_link.migrations_django',",
" 'djangocms_picture': 'djangocms_picture.migrations_django',",
" 'djangocms_teaser': 'djangocms_teaser.migrations_django',",
" 'djangocms_video': 'djangocms_video.migrations_django',",
" 'zinnia': 'organice.migrations.zinnia',",
'}')
def _configure_newsletter():
global settings
_print_verbose(2, adding_settings_for('Emencia Newsletter'))
settings.append_lines('common',
"NEWSLETTER_DEFAULT_HEADER_SENDER = 'Your Organization <[email protected]>'",
'NEWSLETTER_USE_TINYMCE = True',
'NEWSLETTER_TEMPLATES = [',
" {",
" 'title': 'Sample template for newsletter',",
" 'src': '/media/newsletter/templates/sample-template.html',",
" 'description': 'Newsletter template tabular sample',",
" },",
']',
'TINYMCE_DEFAULT_CONFIG = {',
" 'height': 450,",
" 'width': 800,",
" 'convert_urls': False,",
" 'plugins': 'table,paste,searchreplace,template,advlist,autolink,autosave',",
" 'template_templates': NEWSLETTER_TEMPLATES,",
" 'theme': 'advanced',",
" 'theme_advanced_toolbar_location': 'top',",
" 'theme_advanced_buttons1':",
" 'template,|,formatselect,'",
" '|,bold,italic,underline,strikethrough,|,undo,redo,'",
" '|,justifyleft,justifycenter,justifyright,justifyfull,'",
" '|,bullist,numlist,dt,dd,|,outdent,indent,|,blockquote',",
" 'theme_advanced_buttons2':",
" 'tablecontrols,|,forecolor,backcolor,'",
" '|,hr,image,anchor,link,unlink,|,visualaid,code',",
" 'theme_advanced_resizing': True,",
'}')
def _configure_blog():
global settings
_print_verbose(2, adding_settings_for('Zinnia Blog'))
settings.append_lines('common',
'# use plugin system of django-cms in blog entries',
"ZINNIA_ENTRY_BASE_MODEL = 'cmsplugin_zinnia.placeholder.EntryPlaceholder'",
"ZINNIA_WYSIWYG = 'wymeditor'")
def _configure_set_custom():
"""
Set variable values specified via any ``--set`` command line options.
"""
global args
global settings
if args.set:
_print_verbose(2, adding_settings_for(", ".join([v[1] for v in args.set])))
for dest, var, value in args.set:
settings.set_value(dest, var, value)
def _generate_urls_conf():
global projectname
_print_verbose(2, 'Configuring project URLs ...')
gen_by_comment = '# generated by django Organice'
project = DjangoModuleManager(projectname)
project.add_file('urls', lines=(gen_by_comment, 'from organice.urls import urlpatterns # noqa'))
project.save_files()
def _generate_webserver_conf():
global args
global profiles
global projectname
if args.webserver == 'apache':
settings.move_var('common', profiles, 'WSGI_APPLICATION')
else:
_print_verbose(2, 'Generating lighttp web server configuration ...')
os.unlink(os.path.join(projectname, 'wsgi.py'))
settings.delete_var('common', 'WSGI_APPLICATION')
settings.append_lines('common',
'# Override the server-derived value of SCRIPT_NAME',
'# See http://code.djangoproject.com/wiki/'
+ 'BackwardsIncompatibleChanges#lighttpdfastcgiandothers',
"FORCE_SCRIPT_NAME = ''")
settings.move_var('common', profiles, 'FORCE_SCRIPT_NAME')
conf_template = django.template.Template(r"""# Lighttp web server configuration
# {{ account }}.organice.io
$HTTP["host"] =~ "^({{ account }}.organice.io|{{ custom_domain }})$" {
fastcgi.server = (
"/django.fcgi" => (
"main" => (
"socket" => env.HOME + "/{{ organice }}/{{ projectname }}.sock",
"check-local" => "disable",
)
),
)
alias.url = (
"/media/" => env.HOME + "/{{ organice }}/{{ projectname }}.media/",
"/static/" => env.HOME + "/{{ organice }}/{{ projectname }}.static/",
)
url.rewrite-once = (
"^(/media/.*)$" => "/$1",
"^(/static/.*)$" => "/$1",
"^/favicon\.ico$" => "/media/favicon.ico",
"^(/.*)$" => "/django.fcgi$1",
)
# enforce optional custom domain name
{{ ignore }}$HTTP["host"] != "{{ custom_domain }}" {
{{ ignore }} url.redirect = ("^/django.fcgi(.*)$" => "http://{{ custom_domain }}$1")
{{ ignore }}}
}
""")
conf_context = django.template.Context({
'organice': 'organice',
'projectname': projectname,
'account': args.account if args.account else projectname,
'custom_domain': args.domain if args.domain else 'www.example.com',
'ignore': '' if args.domain else '#',
})
conf_file = open('%s.conf' % projectname, 'w')
conf_file.write(conf_template.render(conf_context))
conf_file.close()
def _show_final_hints():
global projectname
global settings
suggest_editing = ('ADMINS', 'TIME_ZONE', 'LANGUAGE_CODE', 'LANGUAGES', 'EMAIL_BACKEND', 'SERVER_EMAIL')
suggest_adding = ()
_print_verbose(1, 'Done. Enjoy your organiced day!')
_print_verbose(2, '')
_print_verbose(2, 'Please visit file `%s` and edit or add the variables: %s' %
(settings.get_file('common').name, ", ".join(suggest_editing + suggest_adding)))
_print_verbose(2, 'Please visit file `%s` and configure your development database in: %s' %
(settings.get_file('develop').name, 'DATABASES'))
_print_verbose(3, 'See https://docs.djangoproject.com/en/1.8/ref/settings/ for details.')
_print_verbose(3, '')
_print_verbose(3, '1) To initialize your development database run: `python manage.py migrate`')
_print_verbose(3, '2) Create a super user for administration, and optionally load some sample data:')
_print_verbose(3, ' `python manage.py createsuperuser '
'&& python manage.py loaddata organice_auth_providers organice_sample_content`')
_print_verbose(3, '3) You can then run your development server with: `python manage.py runserver`')
_print_verbose(3, '4) To prepare your production server you may run: '
'`python manage.py collectstatic --link --settings=%s.settings.production`' % projectname)
def startproject():
"""
Starts a new django Organice project by first generating a Django project
using ``django-admin.py``, and then modifying the project settings.
"""
global settings
_evaluate_command_line()
django.conf.settings.configure() # for django.template init only
_create_project()
_split_project()
_configure_database()
_configure_installed_apps()
_configure_authentication()
_configure_templates()
_configure_cms()
_configure_newsletter()
_configure_blog()
_configure_set_custom()
_generate_urls_conf()
_generate_webserver_conf()
settings.save_files()
_show_final_hints()
if __name__ == "__main__":
startproject()
| [
"[email protected]"
] | |
543aa44ea9776960042b1a781a575c8e48fc9ae6 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/helpers/typeshed/stdlib/mmap.pyi | 8dbec2388838751a229a711f169e2eb9208a238a | [
"Apache-2.0",
"MIT"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 3,766 | pyi | import sys
from _typeshed import ReadableBuffer, Self
from collections.abc import Iterable, Iterator, Sized
from typing import NoReturn, overload
ACCESS_DEFAULT: int
ACCESS_READ: int
ACCESS_WRITE: int
ACCESS_COPY: int
ALLOCATIONGRANULARITY: int
if sys.platform == "linux":
MAP_DENYWRITE: int
MAP_EXECUTABLE: int
if sys.version_info >= (3, 10):
MAP_POPULATE: int
if sys.platform != "win32":
MAP_ANON: int
MAP_ANONYMOUS: int
MAP_PRIVATE: int
MAP_SHARED: int
PROT_EXEC: int
PROT_READ: int
PROT_WRITE: int
PAGESIZE: int
class mmap(Iterable[int], Sized):
if sys.platform == "win32":
def __init__(self, fileno: int, length: int, tagname: str | None = ..., access: int = ..., offset: int = ...) -> None: ...
else:
def __init__(
self, fileno: int, length: int, flags: int = ..., prot: int = ..., access: int = ..., offset: int = ...
) -> None: ...
def close(self) -> None: ...
if sys.version_info >= (3, 8):
def flush(self, offset: int = ..., size: int = ...) -> None: ...
else:
def flush(self, offset: int = ..., size: int = ...) -> int: ...
def move(self, dest: int, src: int, count: int) -> None: ...
def read_byte(self) -> int: ...
def readline(self) -> bytes: ...
def resize(self, newsize: int) -> None: ...
def seek(self, pos: int, whence: int = ...) -> None: ...
def size(self) -> int: ...
def tell(self) -> int: ...
def write_byte(self, byte: int) -> None: ...
def __len__(self) -> int: ...
closed: bool
if sys.version_info >= (3, 8) and sys.platform != "win32":
def madvise(self, option: int, start: int = ..., length: int = ...) -> None: ...
def find(self, sub: ReadableBuffer, start: int = ..., stop: int = ...) -> int: ...
def rfind(self, sub: ReadableBuffer, start: int = ..., stop: int = ...) -> int: ...
def read(self, n: int | None = ...) -> bytes: ...
def write(self, bytes: ReadableBuffer) -> int: ...
@overload
def __getitem__(self, __index: int) -> int: ...
@overload
def __getitem__(self, __index: slice) -> bytes: ...
def __delitem__(self, __index: int | slice) -> NoReturn: ...
@overload
def __setitem__(self, __index: int, __object: int) -> None: ...
@overload
def __setitem__(self, __index: slice, __object: ReadableBuffer) -> None: ...
# Doesn't actually exist, but the object is actually iterable because it has __getitem__ and
# __len__, so we claim that there is also an __iter__ to help type checkers.
def __iter__(self) -> Iterator[int]: ...
def __enter__(self: Self) -> Self: ...
def __exit__(self, *args: object) -> None: ...
if sys.version_info >= (3, 8) and sys.platform != "win32":
MADV_NORMAL: int
MADV_RANDOM: int
MADV_SEQUENTIAL: int
MADV_WILLNEED: int
MADV_DONTNEED: int
MADV_FREE: int
if sys.platform == "linux":
MADV_REMOVE: int
MADV_DONTFORK: int
MADV_DOFORK: int
MADV_HWPOISON: int
MADV_MERGEABLE: int
MADV_UNMERGEABLE: int
# Seems like this constant is not defined in glibc.
# See https://github.com/python/typeshed/pull/5360 for details
# MADV_SOFT_OFFLINE: int
MADV_HUGEPAGE: int
MADV_NOHUGEPAGE: int
MADV_DONTDUMP: int
MADV_DODUMP: int
# This Values are defined for FreeBSD but type checkers do not support conditions for these
if sys.platform != "linux" and sys.platform != "darwin":
MADV_NOSYNC: int
MADV_AUTOSYNC: int
MADV_NOCORE: int
MADV_CORE: int
MADV_PROTECT: int
if sys.version_info >= (3, 10) and sys.platform == "darwin":
MADV_FREE_REUSABLE: int
MADV_FREE_REUSE: int
| [
"[email protected]"
] | |
ba47a3b2b1b54854c914b8d55705e91c8cecca60 | 1a12cc54ac1b2934cddf12eb733d5a8c25a0bf6d | /interviewbit-trees-sum-root-to-leaf-numbers.py | f36de034600560e22fc2107d9968d6e6512945bb | [] | no_license | sbairishal/CodePath-Alumni-Professional-Interview-Prep-Course | 5813b95cb4c23551d06e74a3aaec6ae7815923ae | 71d8f1e7a456a0f97655e7be90fa17fe18ceaf95 | refs/heads/master | 2020-10-01T01:53:29.246652 | 2018-02-21T06:05:57 | 2018-02-21T06:05:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | # Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param A : root node of tree
# @return an integer
def do_the_summing(self, node):
if not node.left and not node.right:
return node.val
else:
if node.left:
node.left.val += node.val * 10
if node.right:
node.right.val += node.val * 10
return (self.do_the_summing(node.left) if node.left else 0) + (self.do_the_summing(node.right) if node.right else 0)
def sumNumbers(self, A):
return self.do_the_summing(A) % 1003
| [
"[email protected]"
] | |
9a8428b2135de28dbcd0c266e17fd55c71c38ea9 | 1e67e211123f694bd807e1efb2a85a8cbdae2882 | /server/printer/urls.py | d55cad5aa7bfa09f168e28197e0ebb4d088820ef | [
"MIT"
] | permissive | coll-gate/collgate | 7590ec8dbc7cdb310d0c8452fd6c6e76cf02985d | 8c2ff1c59adda2bf318040f588c05263317a2812 | refs/heads/master | 2021-01-20T03:00:35.617958 | 2019-03-01T16:46:49 | 2019-03-01T16:46:49 | 89,474,611 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 317 | py | # -*- coding: utf-8; -*-
#
# @file urls.py
# @brief collgate
# @author Frédéric SCHERMA (INRA UMR1095)
# @date 2018-09-20
# @copyright Copyright (c) 2018 INRA/CIRAD
# @license MIT (see LICENSE file)
# @details coll-gate printer module url entry point.
from django.conf.urls import include, url
urlpatterns = [
]
| [
"[email protected]"
] | |
04220f42f761ca699b4f3c16a17d68f44f6de187 | 547e8b46c55fc8e59a2fa95dc1121960badfd7b2 | /ansible/deploy_docker.py | b4a08e7f6b886e08e160d828ffbef0b4dcd9db88 | [
"Apache-2.0"
] | permissive | cammlab/digital_slide_archive | 81bdd234f58321a2f23dff2123d1e8dcbca199bb | 290c1b989d897acc19d6395094dcfe53f670a923 | refs/heads/master | 2022-10-11T13:22:41.813129 | 2020-06-07T15:39:59 | 2020-06-07T15:41:28 | 262,087,325 | 0 | 1 | NOASSERTION | 2020-05-07T15:24:48 | 2020-05-07T15:24:47 | null | UTF-8 | Python | false | false | 40,134 | py | #!/usr/bin/env python
import argparse
import collections
import docker
import getpass
import gzip
import json
import os
import six
import socket
import sys
import tarfile
import time
import uuid
from distutils.version import LooseVersion
if not (LooseVersion('1.9') <= LooseVersion(docker.version)):
raise Exception('docker or docker-py must be >= version 1.9')
BaseName = 'dsa'
ImageList = collections.OrderedDict([
('rabbitmq', {
'tag': 'rabbitmq:management',
'name': BaseName + '_rabbitmq',
'oldnames': ['histomicstk_rmq'],
'pull': True,
}),
('mongodb', {
'tag': 'mongo:latest',
'name': BaseName + '_mongodb',
'oldnames': ['histomicstk_mongodb'],
'pull': True,
}),
('memcached', {
'tag': 'memcached:latest',
'name': BaseName + '_memcached',
'oldnames': ['histomicstk_memcached'],
'pull': True,
}),
('worker', {
'tag': 'dsarchive/dsa_worker',
'name': BaseName + '_worker',
'oldnames': ['histomicstk_girder_worker'],
'dockerfile': 'Dockerfile-worker',
'pinned': 'v1.0.0',
}),
('girder', {
'tag': 'dsarchive/dsa_girder',
'name': BaseName + '_girder',
'oldnames': ['histomicstk_histomicstk'],
'dockerfile': 'Dockerfile-girder',
'pinned': 'v1.0.0',
}),
('cli', {
'tag': 'dsarchive/histomicstk',
'pull': True,
'pinned': 'v0.1.7',
}),
])
def config_mounts(mounts, config):
"""
Add extra mounts to a docker configuration.
:param mounts: a list of mounts to add, or None.
:config: a config dictionary. Mounts are added to the binds entry.
"""
mountNumber = 1
if mounts is None:
mounts = []
for mount in mounts:
mountParts = mount.split(':')
if len(mountParts) < 2:
mountParts.append('')
if mountParts[1] == '':
mountParts[1] = 'mount%d' % mountNumber
mountNumber += 1
if '/' not in mountParts[1]:
mountParts[1] = '/opt/digital_slide_archive/mounts/%s' % mountParts[1]
config['binds'].append(':'.join(mountParts))
def containers_provision(**kwargs): # noqa
"""
Provision or reprovision the containers.
"""
client = docker_client()
ctn = get_docker_image_and_container(
client, 'girder', version=kwargs.get('pinned'))
if kwargs.get('conf'):
merge_configuration(client, ctn, **kwargs)
username = kwargs.get('username')
password = kwargs.get('password')
if username == '':
username = six.moves.input('Admin login: ')
if password == '':
password = getpass.getpass('Password for %s: ' % (
username if username else 'default admin user'))
# docker exec -i -t dsa_girder bash -c
# 'cd /home/ubuntu/digital_slide_archive/ansible && ansible-playbook -i
# inventory/local docker_ansible.yml --extra-vars=docker=provision'
extra_vars = {
'docker': 'provision'
}
if username:
extra_vars['girder_admin_user'] = username
extra_vars['girder_no_create_admin'] = True
if password:
extra_vars['girder_admin_password'] = password
extra_vars['girder_no_create_admin'] = True
if kwargs.get('worker_api_url'):
extra_vars['girder_api_url'] = kwargs['worker_api_url']
if kwargs.get('cli'):
extra_vars['cli_image'] = tag_with_version('cli', **kwargs)
if kwargs['cli'] == 'test':
extra_vars['cli_image_test'] = 'true'
wait_for_girder(client, ctn)
ansible_command = (
'ansible-playbook -i inventory/local docker_ansible.yml '
'--extra-vars=' + six.moves.shlex_quote(json.dumps(extra_vars)))
exec_command = 'bash -c ' + six.moves.shlex_quote(
'cd /home/ubuntu/digital_slide_archive/ansible && ' + ansible_command)
tries = 1
while True:
try:
cmd = client.exec_create(
container=ctn.get('Id'), cmd=exec_command, tty=True)
try:
for output in client.exec_start(cmd.get('Id'), stream=True):
print(convert_to_text(output).strip())
except socket.error:
pass
cmd = client.exec_inspect(cmd.get('Id'))
if not cmd['ExitCode']:
break
except (ValueError, docker.errors.APIError):
time.sleep(1)
print('Error provisioning (try %d)' % tries)
tries += 1
if not kwargs.get('retry'):
raise Exception('Failed to provision')
def containers_start(
port=8080, rmq='docker', mongo='docker', memcached='docker',
provision=False, **kwargs):
"""
Start all appropriate containers. This is, at least, worker and girder.
Optionally, mongodb and rabbitmq are included.
:param port: default port to expose.
:param rmq: 'docker' to use a docker for rabbitmq, 'host' to use the docker
host, otherwise the IP for the rabbitmq instance, where DOCKER_HOST
maps to the docker host and anything else is passed through.
:param mongo: 'docker' to use a docker for mongo, 'host' to use the docker
host, otherwise the IP for the mongo instance, where DOCKER_HOST maps
to the docker host and anything else is passed through. The database
is always 'girder'. Any other value is considered a docker version.
:param provision: if True, reprovision after starting. Otherwise, only
provision if the histomictk container is created.
"""
client = docker_client()
env = {
'HOST_UID': os.popen('id -u').read().strip(),
'HOST_GID': os.popen('id -g').read().strip(),
}
sockpath = '/var/run/docker.sock'
if os.path.exists(sockpath):
env['HOST_DOCKER_GID'] = str(os.stat(sockpath).st_gid)
else:
try:
env['HOST_DOCKER_GID'] = os.popen('getent group docker').read().split(':')[2]
except Exception:
pass
network_create(client, BaseName)
for key in ImageList:
func = 'container_start_' + key
if func in globals():
if globals()[func](
client, env, key, port=port, rmq=rmq, mongo=mongo,
memcached=memcached, provision=provision, **kwargs):
provision = True
if provision:
containers_provision(**kwargs)
def container_start_girder(
client, env, key='girder', port=8080, rmq='docker', mongo='docker',
memcached='docker', provision=False, **kwargs):
"""
Start a Girder container.
:param client: docker client.
:param env: dictionary to store environment variables.
:param key: key within the ImageList.
:param port: default port to expose.
:param rmq: 'docker' to use a docker for rabbitmq, 'host' to use the docker
host, otherwise the IP for the rabbitmq instance, where DOCKER_HOST
maps to the docker host and anything else is passed through.
:param mongo: 'docker' to use a docker for mongo, 'host' to use the docker
host, otherwise the IP for the mongo instance, where DOCKER_HOST maps
to the docker host and anything else is passed through. The database
is always 'girder'. Any other value is considered a docker version.
:param provision: if True, reprovision after starting. Otherwise, only
provision if the histomictk container is created.
:returns: True if the container should be provisioned.
"""
image = tag_with_version(key, **kwargs)
name = ImageList[key]['name']
ctn = get_docker_image_and_container(
client, key, version=kwargs.get('pinned'))
if ctn is None:
provision = True
config = {
'restart_policy': {'name': 'always'},
'privileged': True, # so we can run docker
'links': {},
'port_bindings': {8080: int(port)},
'binds': [
get_path(kwargs['logs']) + ':/opt/logs:rw',
get_path(kwargs['logs']) + ':/opt/digital_slide_archive/logs:rw',
get_path(kwargs['assetstore']) + ':/opt/digital_slide_archive/assetstore:rw',
],
}
config['binds'].extend(docker_mounts())
config_mounts(kwargs.get('mount'), config)
if rmq == 'docker' and 'rabbitmq' in ImageList:
config['links'][ImageList['rabbitmq']['name']] = 'rabbitmq'
if memcached == 'docker' and 'memcached' in ImageList:
config['links'][ImageList['memcached']['name']] = 'memcached'
if mongo != 'host' and 'mongodb' in ImageList:
config['links'][ImageList['mongodb']['name']] = 'mongodb'
params = {
'image': image,
'detach': True,
'hostname': key,
'name': name,
'environment': env.copy(),
'ports': [8080],
}
print('Creating %s - %s' % (image, name))
ctn = client.create_container(
host_config=client.create_host_config(**config),
networking_config=client.create_networking_config({
BaseName: client.create_endpoint_config(aliases=[key])
}),
**params)
if ctn.get('State') != 'running':
print('Starting %s - %s' % (image, name))
client.start(container=ctn.get('Id'))
return provision
def container_start_memcached(client, env, key='memcached', memcached='docker', **kwargs):
"""
Start a memcached container if desired, or set an environment variable so
other containers know where to find it.
:param client: docker client.
:param env: dictionary to store environment variables.
:param key: key within the ImageList.
:param memcached: 'docker' to use a docker for memcached, 'host' to use the
docker host, otherwise the IP for the memcached instance, where
DOCKER_HOST maps to the docker host and anything else is passed
through.
"""
if memcached == 'host':
env['HOST_MEMCACHED'] = 'true'
elif memcached == 'docker':
image = tag_with_version(key, **kwargs)
name = ImageList[key]['name']
ctn = get_docker_image_and_container(
client, key, version=kwargs.get('pinned'))
if ctn is None:
config = {
'restart_policy': {'name': 'always'},
}
params = {
'image': image,
'command': ['memcached', '-m', str(kwargs.get('cache', 1024))],
'detach': True,
'hostname': key,
'name': name,
}
print('Creating %s - %s' % (image, name))
ctn = client.create_container(
host_config=client.create_host_config(**config),
networking_config=client.create_networking_config({
BaseName: client.create_endpoint_config(aliases=[key])
}),
**params)
if ctn.get('State') != 'running':
print('Starting %s - %s' % (image, name))
client.start(container=ctn.get('Id'))
else:
env['HOST_MEMCACHED'] = 'true' if memcached == 'host' else memcached
def container_start_mongodb(client, env, key='mongodb', mongo='docker',
mongodb_path='docker', **kwargs):
"""
Start a mongo container if desired, or set an environment variable so other
containers know where to find it.
:param client: docker client.
:param env: dictionary to store environment variables.
:param key: key within the ImageList.
:param mongo: 'docker' to use a docker for mongo, 'host' to use the docker
host, otherwise the IP for the mongo instance, where DOCKER_HOST maps
to the docker host and anything else is passed through. The database
is always 'girder'. Any other value is considered a docker version.
:param mongodb_path: the path to use for mongo when run in docker. If
'docker', use an internal data directory.
"""
if mongo == 'host':
env['HOST_MONGO'] = 'true'
# If we generate the girder worker config file on the fly, update this
# to something like:
# env['HOST_MONGO'] = mongo if mongo != 'host' else 'DOCKER_HOST'
else:
version = None if mongo == 'docker' else mongo
image = tag_with_version(key, version=version, **kwargs)
name = ImageList[key]['name']
ctn = get_docker_image_and_container(
client, key, version=version if version else kwargs.get('pinned'))
if ctn is None:
config = {
'restart_policy': {'name': 'always'},
'binds': [
# If we bind the log path, we also need to pass the logpath
# as part of the container command and run the container
# with the local log path's user ID so that mongo will
# write to that directory.
# get_path(kwargs['logs']) + ':/var/log/mongodb:rw',
]
}
params = {
'image': image,
'detach': True,
'hostname': key,
'name': name,
}
if mongodb_path != 'docker':
params['volumes'] = ['/data/db']
config['binds'].append(get_path(mongodb_path) + ':/data/db:rw')
print('Creating %s - %s' % (image, name))
ctn = client.create_container(
host_config=client.create_host_config(**config),
networking_config=client.create_networking_config({
BaseName: client.create_endpoint_config(aliases=[key])
}),
**params)
if ctn.get('State') != 'running':
print('Starting %s - %s' % (image, name))
client.start(container=ctn.get('Id'))
def container_start_rabbitmq(
client, env, key='rabbitmq', rmq='docker', rmqport=None, **kwargs):
"""
Start a rabbitmq container if desired, or set an environment variable so
other containers know where to find it.
:param client: docker client.
:param env: dictionary to store environment variables.
:param key: key within the ImageList.
:param rmq: 'docker' to use a docker for rabbitmq, 'host' to use the docker
host, otherwise the IP for the rabbitmq instance, where DOCKER_HOST
maps to the docker host and anything else is passed through.
:param rmqport: if specified, docker RMQ port to expose
"""
if rmq == 'docker':
image = tag_with_version(key, **kwargs)
name = ImageList[key]['name']
ctn = get_docker_image_and_container(
client, key, version=kwargs.get('pinned'))
if ctn is None:
config = {
'restart_policy': {'name': 'always'},
}
params = {
'image': image,
'detach': True,
'hostname': key,
'name': name,
# 'ports': [15672], # for management access
}
if rmqport:
params['ports'] = [5672]
config['port_bindings'] = {5672: int(rmqport)}
print('Creating %s - %s' % (image, name))
ctn = client.create_container(
host_config=client.create_host_config(**config),
networking_config=client.create_networking_config({
BaseName: client.create_endpoint_config(aliases=[key])
}),
**params)
if ctn.get('State') != 'running':
print('Starting %s - %s' % (image, name))
client.start(container=ctn.get('Id'))
else:
env['HOST_RMQ'] = 'true' if rmq == 'host' else rmq
# If we generate the girder worker config file on the fly, update this
# to something like:
# env['HOST_RMQ'] = rmq if rmq != 'host' else 'DOCKER_HOST'
def container_start_worker(client, env, key='worker', rmq='docker', **kwargs):
"""
Start a girder_worker container.
:param client: docker client.
:param env: dictionary to store environment variables.
:param key: key within the ImageList.
:param rmq: 'docker' to use a docker for rabbitmq, 'host' to use the docker
host, otherwise the IP for the rabbitmq instance, where DOCKER_HOST
maps to the docker host and anything else is passed through.
"""
image = tag_with_version(key, **kwargs)
name = ImageList[key]['name']
ctn = get_docker_image_and_container(
client, key, version=kwargs.get('pinned'))
if ctn is None:
worker_tmp_root = (
kwargs['worker_tmp_root'] if kwargs['worker_tmp_root'] else '/tmp/girder_worker')
config = {
'restart_policy': {'name': 'always'},
'privileged': True, # so we can run docker
'links': {},
'binds': [
get_path(kwargs['logs']) + ':/opt/logs:rw',
'%s:%s' % (worker_tmp_root, worker_tmp_root),
get_path(kwargs['assetstore']) + ':/opt/digital_slide_archive/assetstore:rw',
],
}
config['binds'].extend(docker_mounts())
config_mounts(kwargs.get('mount'), config)
if rmq == 'docker':
config['links'][ImageList['rabbitmq']['name']] = 'rabbitmq'
else:
env['HOST_RMQ'] = 'true' if rmq == 'host' else rmq
env['GIRDER_WORKER_TMP_ROOT'] = worker_tmp_root
if 'concurrency' in kwargs:
env['GIRDER_WORKER_CONCURRENCY'] = kwargs['concurrency']
params = {
'image': image,
'detach': True,
'hostname': '%s_%s' % (key, str(uuid.uuid1(uuid.getnode(), 0))[24:]),
'name': name,
'environment': env.copy(),
}
print('Creating %s - %s' % (image, name))
ctn = client.create_container(
host_config=client.create_host_config(**config),
networking_config=client.create_networking_config({
BaseName: client.create_endpoint_config(aliases=[key])
}),
**params)
if ctn.get('State') != 'running':
print('Starting %s - %s' % (image, name))
client.start(container=ctn.get('Id'))
def containers_status(**kwargs):
"""
Report the status of any containers we are responsible for.
"""
client = docker_client()
keys = ImageList.keys()
results = []
for key in keys:
if 'name' not in ImageList:
continue
ctn = get_docker_image_and_container(client, key, False)
entry = {
'key': key,
'name': ImageList[key]['name'],
'state': 'not created',
}
if ctn:
entry['state'] = ctn.get('State', entry['state'])
entry['status'] = ctn.get('Status')
results.append(entry)
print_table(results, collections.OrderedDict([
('name', 'Name'),
('state', 'State'),
('status', 'Status')]))
def containers_stop(remove=False, **kwargs):
"""
Stop and optionally remove any containers we are responsible for.
:param remove: True to remove the containers. False to just stop them.
"""
client = docker_client()
keys = list(ImageList.keys())
keys.reverse()
for key in keys:
ctn = get_docker_image_and_container(client, key, False)
if ctn:
if ctn.get('State') != 'exited':
print('Stopping %s' % (key))
client.stop(container=ctn.get('Id'))
if remove:
print('Removing %s' % (key))
client.remove_container(container=ctn.get('Id'))
if remove:
network_remove(client, BaseName)
def convert_to_text(value):
"""
Make sure a value is a text type in a Python version generic manner.
:param value: a value that is either a text or binary string.
:returns value: a text value.
"""
if isinstance(value, six.binary_type):
value = value.decode('utf8')
if not isinstance(value, six.text_type):
value = str(value)
return value
def docker_client():
"""
Return the current docker client in a manner that works with both the
docker-py and docker modules.
"""
try:
client = docker.from_env(version='auto', timeout=3600)
except TypeError:
# On older versions of docker-py (such as 1.9), version isn't a
# parameter, so try without it
client = docker.from_env()
client = client if not hasattr(client, 'api') else client.api
return client
def docker_mounts():
"""
Return a list of mounts needed to work with the host's docker.
:return: a list of volumes need to work with girder.
"""
docker_executable = '/usr/bin/docker'
if not os.path.exists(docker_executable):
import shutil
if not six.PY3:
import shutilwhich # noqa
docker_executable = shutil.which('docker')
mounts = [
docker_executable + ':/usr/bin/docker',
'/var/run/docker.sock:/var/run/docker.sock',
]
return mounts
def get_docker_image_and_container(client, key, pullOrBuild=True, version=None):
"""
Given a key from the docker ImageList, check if an image is present. If
not, pull it. Check if an associated container exists and return
information on it if so.
:param client: docker client.
:param key: key in the ImageList.
:param pullOrBuild: if True, try to pull or build the image if it isn't
present. If 'pull', try to pull the image (not build), even if we
already have it.
:param version: if True, use the pinned version when pulling. If a string,
use that version. Otherwise, don't specify a version (which defaults
to latest).
:returns: docker container or None.
"""
if pullOrBuild:
pull = False
image = tag_with_version(key, version)
try:
client.inspect_image(image)
except docker.errors.NotFound:
pull = True
if pull or pullOrBuild == 'pull':
print('Pulling %s' % image)
try:
client.pull(image)
except Exception:
if pullOrBuild == 'pull':
raise
if not ImageList[key].get('pull'):
images_build(True, key)
if ImageList[key].get('name'):
containers = client.containers(all=True)
names = [ImageList[key].get('name')] + ImageList[key].get('oldnames', [])
for name in names:
ctn = [entry for entry in containers if name in
[val.strip('/') for val in entry.get('Names', [])]]
if len(ctn):
return ctn[0]
return None
def get_path(path):
"""
Resolve a path to its realpath, creating a directory there if it doesn't
exist.
:param path: path to resolve and possibly create.
:return: the resolved path.
"""
path = os.path.realpath(os.path.expanduser(path))
if not os.path.exists(path):
os.makedirs(path)
return path
def images_build(retry=False, names=None):
r"""
Build necessary docker images from our dockerfiles.
This is equivalent to running:
docker build --force-rm --tag dsarchive/dsa_worker -f Dockerfile-worker .
docker build --force-rm --tag dsarchive/dsa_girder -f Dockerfile-girder .
:param retry: True to retry until success
:param names: None to build all, otherwise a string or a list of strings of
names to build.
"""
basepath = os.path.dirname(os.path.realpath(__file__))
client = docker_client()
if names is None:
names = ImageList.keys()
elif isinstance(names, six.string_types):
names = [names]
for name in ImageList:
if not ImageList[name].get('dockerfile') or name not in names:
continue
tries = 1
while True:
errored = False
print('Building %s%s' % (
name, '(try %d)' % tries if tries > 1 else ''))
buildStatus = client.build(
path=basepath,
tag=ImageList[name]['tag'],
rm=True,
pull=True,
forcerm=True,
dockerfile=ImageList[name]['dockerfile'],
decode=True,
)
for status in buildStatus:
statusLine = status.get('status', status.get('stream', '')).strip()
try:
print(statusLine)
except Exception:
print(repr(statusLine))
if 'errorDetail' in status:
if not retry:
sys.exit(1)
errored = True
break
if not errored:
break
print('Error building %s\n' % name)
tries += 1
print('Done building %s\n' % name)
def images_repull(**kwargs):
"""
Repull all docker images.
"""
client = docker_client()
for key, image in six.iteritems(ImageList):
if 'name' not in image and not kwargs.get('cli'):
continue
get_docker_image_and_container(
client, key, 'pull', version=kwargs.get('pinned'))
def merge_configuration(client, ctn, conf, **kwargs):
"""
Merge a Girder configuration file with the one in a running container.
:param client: the docker client.
:param ctn: a running docker container that contains
/home/ubuntu/.girder/girder.cfg
:param conf: a path to a configuration file fragment to merge with the
extant file.
"""
if not os.path.exists(conf):
print('Failed to read config file %r' % conf)
return
cfgPath = '/home/ubuntu/.girder'
cfgName = 'girder.cfg'
tarStream, stat = client.get_archive(ctn, cfgPath + '/' + cfgName)
if hasattr(tarStream, 'read'):
tarData = tarStream.read()
else:
tarData = b''.join([part for part in tarStream])
# Check if this is actually gzipped and uncompress it
if tarData[:2] == b'\x1f\x8b':
tarData = gzip.GzipFile(fileobj=six.BytesIO(tarData)).read()
tarStream = six.BytesIO(tarData)
tar = tarfile.TarFile(mode='r', fileobj=tarStream)
parser = six.moves.configparser.SafeConfigParser()
cfgFile = six.StringIO(convert_to_text(tar.extractfile(cfgName).read()))
parser.readfp(cfgFile)
parser.read(conf)
output = six.StringIO()
parser.write(output)
output = output.getvalue()
if kwargs.get('verbose') >= 1:
print(output)
if isinstance(output, six.text_type):
output = output.encode('utf8')
output = six.BytesIO(output)
output.seek(0, os.SEEK_END)
outputlen = output.tell()
output.seek(0)
tarOutput = six.BytesIO()
tar = tarfile.TarFile(fileobj=tarOutput, mode='w')
tarinfo = tarfile.TarInfo(name=cfgName)
tarinfo.size = outputlen
tarinfo.mtime = time.time()
tar.addfile(tarinfo, output)
tar.close()
tarOutput.seek(0)
client.put_archive(ctn, cfgPath, data=tarOutput)
def network_create(client, name):
"""
Ensure a network exists with a specified name.
:param client: docker client.
:param name: name of the network.
"""
networks = client.networks()
net = [entry for entry in networks if name == entry.get('Name')]
if len(net):
return
client.create_network(name)
def network_remove(client, name):
"""
Ensure a network with a specified name is removed.
:param client: docker client.
:param name: name of the network.
"""
networks = client.networks()
net = [entry for entry in networks if name == entry.get('Name')]
if not len(net):
return
client.remove_network(net[0].get('Id'))
def pinned_versions():
"""
Get a list of images that have pinned versions.
:return: a list of image names with versions.
"""
pinned = []
for image in six.itervalues(ImageList):
if 'pinned' in image:
pinned.append('%s:%s' % (image['tag'], image['pinned']))
return pinned
def print_table(table, headers):
"""
Format and print a table.
:param table: a list of dictionaries to display.
:param headers: an order dictionary of keys to display with the values
being the column headers.
"""
widths = {}
for key in headers:
widths[key] = len(str(headers[key]))
for row in table:
if key in row:
widths[key] = max(widths[key], len(str(row[key])))
format = ' '.join(['%%-%ds' % widths[key] for key in headers])
print(format % tuple([headers[key] for key in headers]))
for row in table:
print(format % tuple([row.get(key, '') for key in headers]))
def show_info():
"""
Print additional installation notes.
"""
print("""
Running containers can be joined using a command like
docker exec -i -t dsa_girder bash
To allow docker containers to use memcached, make sure the host is running
memcached and it is listening on the docker IP address (or listening on all
addresses via -l 0.0.0.0).
To determine the current mongo docker version, use a command like
docker exec dsa_mongodb mongo girder --eval 'db.version()'
To check if mongo can be upgraded, query the compatability mode via
docker exec dsa_mongodb mongo girder --eval \\
'db.adminCommand({getParameter: 1, featureCompatibilityVersion: 1})'
Mongo can only be upgraded if the compatibility version is the same as the
semi-major version. Before upgrading, set the compatibility mode. For
instance, if Mongo 3.6.1 is running,
docker exec dsa_mongodb mongo girder --eval \\
'db.adminCommand({setFeatureCompatibilityVersion: "3.6"})'
after which Mongo can be upgraded to version 4.0. After upgrading, set the
compatibility mode to the new version.
""")
def tag_with_version(key, version=None, **kwargs):
"""
Get an image tag with a version appended to it. If the pinned parameter is
specified, use the specified or pinned version.
:param key: the key in the image list.
:param version: the version to use, True to use the pinned value, or None
to use latest. If None, use kwargs.get('pinned') as the version.
:return: the image tag with a version.
"""
image = ImageList[key]['tag']
if version is None:
version = kwargs.get('pinned')
if version is True:
version = ImageList[key].get('pinned')
if isinstance(version, six.string_types):
image = image.split(':', 1)[0] + ':' + version
if ':' not in image:
image += ':latest'
return image
def wait_for_girder(client, ctn, maxWait=3600):
"""
Wait for Girder in a specific container to respond with its current
version.
:param client: docker client.
:param ctn: docker container with Girder.
:param maxWait: maximum time to wait for Girder to respond.
"""
starttime = time.time()
sys.stdout.write('Waiting for Girder to report version: ')
sys.stdout.flush()
# This really should be the girder_api_url from the current settings
girder_api_url = 'http://girder:8080/api/v1'
exec_command = 'bash -c ' + six.moves.shlex_quote(
'curl "%s/system/version"' % girder_api_url)
while time.time() - starttime < maxWait:
cmd = client.exec_create(
container=ctn.get('Id'), cmd=exec_command, tty=True)
output = client.exec_start(cmd.get('Id'), stream=False)
try:
output = json.loads(convert_to_text(output).strip())
if 'release' in output or 'apiVersion' in output:
break
except Exception:
pass
output = None
time.sleep(1)
sys.stdout.write('.')
sys.stdout.flush()
if not output:
raise Exception('Girder never responded')
sys.stdout.write(' %s\n' % output.get('release', output.get('apiVersion', None)))
sys.stdout.write('Took {} seconds\n'.format(time.time() - starttime))
if __name__ == '__main__': # noqa
parser = argparse.ArgumentParser(
description='Provision and run Digital Slide Archive in docker containers.')
parser.add_argument(
'command',
choices=['start', 'restart', 'stop', 'rm', 'remove', 'status',
'build', 'provision', 'info', 'pull'],
help='Start, stop, stop and remove, restart, check the status of, or '
'build our own docker containers')
parser.add_argument(
'--assetstore', '-a', default='~/.dsa/assetstore',
help='Assetstore path.')
parser.add_argument(
'--build', '-b', dest='build', action='store_true',
help='Build girder and worker docker images.')
parser.add_argument(
'--cache', type=int, default=1024,
help='memcached memory in Mbytes. Default 1024.')
parser.add_argument(
'--cli', '-c', dest='cli', action='store_true', default=True,
help='Pull and install the HistomicsTK cli docker image.')
parser.add_argument(
'--cli-test', dest='cli', action='store_const', const='test',
help='Pull and install the HistomicsTK cli docker image; test the CLI.')
parser.add_argument(
'--no-cli', dest='cli', action='store_false',
help='Do not pull or install the HistomicsTK cli docker image.')
parser.add_argument(
'--concurrency', '-j', type=int,
help='Girder worker concurrency.')
parser.add_argument(
'--conf', '--cfg', '--girder-cfg',
help='Merge a Girder configuration file with the default '
'configuration in the docker container during provisioning.')
parser.add_argument(
'--db', '-d', dest='mongodb_path', default='~/.dsa/db',
help='Database path (if a Mongo docker container is used). Use '
'"docker" for the default docker storage location.')
parser.add_argument(
'--image', action='append',
help='Override docker image information. The value is of the form '
'key:tag:dockerfile.')
parser.add_argument(
'--info', action='store_true',
help='Show installation and usage notes.')
parser.add_argument(
'--logs', '--log', '-l', default='~/.dsa/logs',
help='Logs path.')
parser.add_argument(
'--memcached', default='docker',
help='Either use memcached from docker or from host (docker, host, or '
'IP adress or hostname of host.')
parser.add_argument(
'--mongo', '-m', default='docker',
choices=['docker', 'host', '3.4', '3.6', '4.0', '4.2', '4.4', 'latest'],
help='Either use mongo from docker or from host. If a version is '
'specified, the docker with that version will be used.')
parser.add_argument(
'--mount', '--extra', '-e', action='append',
help='Extra volumes to mount. These are mounted internally at '
'/opt/digital_slide_archive/mounts/(name), and are specified in the '
'form (host path)[:(name)[:ro]]. If no name is specified, mountX is '
'used, starting at mount1.')
parser.add_argument(
'--only', '-o',
help='A comma separated list to only start specified containers. '
'Defaults to all containers (%s).' % (','.join([
key for key in ImageList.keys() if key != 'cli']))),
parser.add_argument(
'--password', '--pass', '--passwd', '--pw',
const='', default=None, nargs='?',
help='Override the Girder admin password used in provisioning. Set '
'to an empty string to be prompted for username and password.')
parser.add_argument(
'--pinned', dest='pinned', action='store_true', default=False,
help='When pulling images, use the pinned versions (%s).' % (
', '.join(pinned_versions())))
parser.add_argument(
'--latest', dest='pinned', action='store_false',
help='When pulling images, use the latest images.')
parser.add_argument(
'--provision', action='store_true',
help='Reprovision the Girder the docker containers are started.')
parser.add_argument(
'--port', '-p', type=int, default=8080,
help='Girder access port.')
parser.add_argument(
'--pull', action='store_true',
help='Repull docker images.')
parser.add_argument(
'--retry', '-r', action='store_true', default=True,
help='Retry builds and provisioning until they succeed')
parser.add_argument(
'--rmqport', type=int,
help='RabbitMQ access port (commonly 5672).')
parser.add_argument(
'--no-retry', '--once', '-1', dest='retry', action='store_false',
help='Do not retry builds and provisioning until they succeed')
parser.add_argument(
'--rmq', default='docker',
help='Either use rabbitmq from docker or from host (docker, host, or '
'IP adress or hostname of host.')
parser.add_argument(
'--status', '-s', action='store_true',
help='Report the status of relevant docker containers and images.')
parser.add_argument(
'--username', '--user', const='', default=None, nargs='?',
help='Override the Girder admin username used in provisioning. Set '
'to an empty string to be prompted for username and password.')
parser.add_argument(
'--worker-api-url',
help='The alternate Girder API URL used by workers to reach Girder. '
'This defaults to http://girder:8080/api/v1')
parser.add_argument(
'--worker-tmp-root', '--tmp', default='/tmp/girder_worker',
help='The path to use for the girder_worker tmp_root. This must be '
'reachable by the girder and worker docker containers. It cannot be '
'a top-level directory.')
parser.add_argument('--verbose', '-v', action='count', default=0)
# Should we add an optional url or host value for rmq and mongo?
# Should we allow installing git repos in a local directory to make it
# easier to develop python and javascript?
# We should figure out how to run the ctests
# Add a provisioning step to copy sample data (possibly by mounting the
# appropriate host directory if it exists).
args = parser.parse_args()
if args.verbose >= 2:
print('Parsed arguments: %r' % args)
if args.image:
for imagestr in args.image:
key, tag, dockerfile = imagestr.split(':')
ImageList[key]['tag'] = tag
ImageList[key]['dockerfile'] = dockerfile
if args.only:
only = set(['cli'] + args.only.split(','))
for key in list(ImageList.keys()):
if key not in only:
del ImageList[key]
if args.info or args.command == 'info':
show_info()
if args.command == 'provision':
args.command = 'start'
args.provision = True
if args.pull or args.command == 'pull':
images_repull(**vars(args))
if args.build or args.command == 'build':
images_build(args.retry)
if args.command in ('stop', 'restart', 'rm', 'remove'):
containers_stop(remove=args.command in ('rm', 'remove'))
if args.command in ('start', 'restart'):
# Migration of ~/.histomicstk to ~/.dsa
keys = ['assetstore', 'logs', 'mongodb_path']
oldpath, newpath = '~/.histomicstk', '~/.dsa'
if (os.path.isdir(os.path.expanduser(oldpath)) and
not os.path.isdir(os.path.expanduser(newpath)) and
not any(getattr(args, key, '').startswith(oldpath) for key in keys) and
any(getattr(args, key, '').startswith(newpath) for key in keys)):
os.rename(os.path.expanduser(oldpath), os.path.expanduser(newpath))
containers_start(**vars(args))
if args.command in ('status', ) or args.status:
containers_status(**vars(args))
| [
"[email protected]"
] | |
d064af31ff8dfcb1bbf2989110897c624381d5eb | 7343ece3b82ac87a594865c4074623b45b0297b4 | /tests/rest/admin/test_federation.py | 4c7864c629f3e868f98b23586f50b11b14144dd4 | [
"Apache-2.0"
] | permissive | matrix-org/synapse | a00111f83310783b78e2996557f8bbae4d9fb229 | d35bed8369514fe727b4fe1afb68f48cc8b2655a | refs/heads/develop | 2023-09-05T05:24:20.808942 | 2023-09-04T16:14:09 | 2023-09-04T16:14:09 | 22,844,864 | 12,215 | 2,869 | Apache-2.0 | 2023-09-14T15:20:48 | 2014-08-11T15:51:42 | Python | UTF-8 | Python | false | false | 29,661 | py | # Copyright 2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
from parameterized import parameterized
from twisted.test.proto_helpers import MemoryReactor
import synapse.rest.admin
from synapse.api.errors import Codes
from synapse.rest.client import login, room
from synapse.server import HomeServer
from synapse.types import JsonDict
from synapse.util import Clock
from tests import unittest
class FederationTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
self.url = "/_synapse/admin/v1/federation/destinations"
@parameterized.expand(
[
("GET", "/_synapse/admin/v1/federation/destinations"),
("GET", "/_synapse/admin/v1/federation/destinations/dummy"),
(
"POST",
"/_synapse/admin/v1/federation/destinations/dummy/reset_connection",
),
]
)
def test_requester_is_no_admin(self, method: str, url: str) -> None:
"""If the user is not a server admin, an error 403 is returned."""
self.register_user("user", "pass", admin=False)
other_user_tok = self.login("user", "pass")
channel = self.make_request(
method,
url,
content={},
access_token=other_user_tok,
)
self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_invalid_parameter(self) -> None:
"""If parameters are invalid, an error is returned."""
# negative limit
channel = self.make_request(
"GET",
self.url + "?limit=-5",
access_token=self.admin_user_tok,
)
self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# negative from
channel = self.make_request(
"GET",
self.url + "?from=-5",
access_token=self.admin_user_tok,
)
self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# unkown order_by
channel = self.make_request(
"GET",
self.url + "?order_by=bar",
access_token=self.admin_user_tok,
)
self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# invalid search order
channel = self.make_request(
"GET",
self.url + "?dir=bar",
access_token=self.admin_user_tok,
)
self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# invalid destination
channel = self.make_request(
"GET",
self.url + "/dummy",
access_token=self.admin_user_tok,
)
self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
# invalid destination
channel = self.make_request(
"POST",
self.url + "/dummy/reset_connection",
access_token=self.admin_user_tok,
)
self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
def test_limit(self) -> None:
"""Testing list of destinations with limit"""
number_destinations = 20
self._create_destinations(number_destinations)
channel = self.make_request(
"GET",
self.url + "?limit=5",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_destinations)
self.assertEqual(len(channel.json_body["destinations"]), 5)
self.assertEqual(channel.json_body["next_token"], "5")
self._check_fields(channel.json_body["destinations"])
def test_from(self) -> None:
"""Testing list of destinations with a defined starting point (from)"""
number_destinations = 20
self._create_destinations(number_destinations)
channel = self.make_request(
"GET",
self.url + "?from=5",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_destinations)
self.assertEqual(len(channel.json_body["destinations"]), 15)
self.assertNotIn("next_token", channel.json_body)
self._check_fields(channel.json_body["destinations"])
def test_limit_and_from(self) -> None:
"""Testing list of destinations with a defined starting point and limit"""
number_destinations = 20
self._create_destinations(number_destinations)
channel = self.make_request(
"GET",
self.url + "?from=5&limit=10",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_destinations)
self.assertEqual(channel.json_body["next_token"], "15")
self.assertEqual(len(channel.json_body["destinations"]), 10)
self._check_fields(channel.json_body["destinations"])
def test_next_token(self) -> None:
"""Testing that `next_token` appears at the right place"""
number_destinations = 20
self._create_destinations(number_destinations)
# `next_token` does not appear
# Number of results is the number of entries
channel = self.make_request(
"GET",
self.url + "?limit=20",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_destinations)
self.assertEqual(len(channel.json_body["destinations"]), number_destinations)
self.assertNotIn("next_token", channel.json_body)
# `next_token` does not appear
# Number of max results is larger than the number of entries
channel = self.make_request(
"GET",
self.url + "?limit=21",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_destinations)
self.assertEqual(len(channel.json_body["destinations"]), number_destinations)
self.assertNotIn("next_token", channel.json_body)
# `next_token` does appear
# Number of max results is smaller than the number of entries
channel = self.make_request(
"GET",
self.url + "?limit=19",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_destinations)
self.assertEqual(len(channel.json_body["destinations"]), 19)
self.assertEqual(channel.json_body["next_token"], "19")
# Check
# Set `from` to value of `next_token` for request remaining entries
# `next_token` does not appear
channel = self.make_request(
"GET",
self.url + "?from=19",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_destinations)
self.assertEqual(len(channel.json_body["destinations"]), 1)
self.assertNotIn("next_token", channel.json_body)
def test_list_all_destinations(self) -> None:
"""List all destinations."""
number_destinations = 5
self._create_destinations(number_destinations)
channel = self.make_request(
"GET",
self.url,
{},
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(number_destinations, len(channel.json_body["destinations"]))
self.assertEqual(number_destinations, channel.json_body["total"])
# Check that all fields are available
self._check_fields(channel.json_body["destinations"])
def test_order_by(self) -> None:
"""Testing order list with parameter `order_by`"""
def _order_test(
expected_destination_list: List[str],
order_by: Optional[str],
dir: Optional[str] = None,
) -> None:
"""Request the list of destinations in a certain order.
Assert that order is what we expect
Args:
expected_destination_list: The list of user_id in the order
we expect to get back from the server
order_by: The type of ordering to give the server
dir: The direction of ordering to give the server
"""
url = f"{self.url}?"
if order_by is not None:
url += f"order_by={order_by}&"
if dir is not None and dir in ("b", "f"):
url += f"dir={dir}"
channel = self.make_request(
"GET",
url,
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], len(expected_destination_list))
returned_order = [
row["destination"] for row in channel.json_body["destinations"]
]
self.assertEqual(expected_destination_list, returned_order)
self._check_fields(channel.json_body["destinations"])
# create destinations
dest = [
("sub-a.example.com", 100, 300, 200, 300),
("sub-b.example.com", 200, 200, 100, 100),
("sub-c.example.com", 300, 100, 300, 200),
]
for (
destination,
failure_ts,
retry_last_ts,
retry_interval,
last_successful_stream_ordering,
) in dest:
self._create_destination(
destination,
failure_ts,
retry_last_ts,
retry_interval,
last_successful_stream_ordering,
)
# order by default (destination)
_order_test([dest[0][0], dest[1][0], dest[2][0]], None)
_order_test([dest[0][0], dest[1][0], dest[2][0]], None, "f")
_order_test([dest[2][0], dest[1][0], dest[0][0]], None, "b")
# order by destination
_order_test([dest[0][0], dest[1][0], dest[2][0]], "destination")
_order_test([dest[0][0], dest[1][0], dest[2][0]], "destination", "f")
_order_test([dest[2][0], dest[1][0], dest[0][0]], "destination", "b")
# order by failure_ts
_order_test([dest[0][0], dest[1][0], dest[2][0]], "failure_ts")
_order_test([dest[0][0], dest[1][0], dest[2][0]], "failure_ts", "f")
_order_test([dest[2][0], dest[1][0], dest[0][0]], "failure_ts", "b")
# order by retry_last_ts
_order_test([dest[2][0], dest[1][0], dest[0][0]], "retry_last_ts")
_order_test([dest[2][0], dest[1][0], dest[0][0]], "retry_last_ts", "f")
_order_test([dest[0][0], dest[1][0], dest[2][0]], "retry_last_ts", "b")
# order by retry_interval
_order_test([dest[1][0], dest[0][0], dest[2][0]], "retry_interval")
_order_test([dest[1][0], dest[0][0], dest[2][0]], "retry_interval", "f")
_order_test([dest[2][0], dest[0][0], dest[1][0]], "retry_interval", "b")
# order by last_successful_stream_ordering
_order_test(
[dest[1][0], dest[2][0], dest[0][0]], "last_successful_stream_ordering"
)
_order_test(
[dest[1][0], dest[2][0], dest[0][0]], "last_successful_stream_ordering", "f"
)
_order_test(
[dest[0][0], dest[2][0], dest[1][0]], "last_successful_stream_ordering", "b"
)
def test_search_term(self) -> None:
"""Test that searching for a destination works correctly"""
def _search_test(
expected_destination: Optional[str],
search_term: str,
) -> None:
"""Search for a destination and check that the returned destinationis a match
Args:
expected_destination: The room_id expected to be returned by the API.
Set to None to expect zero results for the search
search_term: The term to search for room names with
"""
url = f"{self.url}?destination={search_term}"
channel = self.make_request(
"GET",
url.encode("ascii"),
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
# Check that destinations were returned
self.assertTrue("destinations" in channel.json_body)
self._check_fields(channel.json_body["destinations"])
destinations = channel.json_body["destinations"]
# Check that the expected number of destinations were returned
expected_destination_count = 1 if expected_destination else 0
self.assertEqual(len(destinations), expected_destination_count)
self.assertEqual(channel.json_body["total"], expected_destination_count)
if expected_destination:
# Check that the first returned destination is correct
self.assertEqual(expected_destination, destinations[0]["destination"])
number_destinations = 3
self._create_destinations(number_destinations)
# Test searching
_search_test("sub0.example.com", "0")
_search_test("sub0.example.com", "sub0")
_search_test("sub1.example.com", "1")
_search_test("sub1.example.com", "1.")
# Test case insensitive
_search_test("sub0.example.com", "SUB0")
_search_test(None, "foo")
_search_test(None, "bar")
def test_get_single_destination_with_retry_timings(self) -> None:
"""Get one specific destination which has retry timings."""
self._create_destinations(1)
channel = self.make_request(
"GET",
self.url + "/sub0.example.com",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("sub0.example.com", channel.json_body["destination"])
# Check that all fields are available
# convert channel.json_body into a List
self._check_fields([channel.json_body])
def test_get_single_destination_no_retry_timings(self) -> None:
"""Get one specific destination which has no retry timings."""
self._create_destination("sub0.example.com")
channel = self.make_request(
"GET",
self.url + "/sub0.example.com",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual("sub0.example.com", channel.json_body["destination"])
self.assertEqual(0, channel.json_body["retry_last_ts"])
self.assertEqual(0, channel.json_body["retry_interval"])
self.assertIsNone(channel.json_body["failure_ts"])
self.assertIsNone(channel.json_body["last_successful_stream_ordering"])
def test_destination_reset_connection(self) -> None:
"""Reset timeouts and wake up destination."""
self._create_destination("sub0.example.com", 100, 100, 100)
channel = self.make_request(
"POST",
self.url + "/sub0.example.com/reset_connection",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
retry_timings = self.get_success(
self.store.get_destination_retry_timings("sub0.example.com")
)
self.assertIsNone(retry_timings)
def test_destination_reset_connection_not_required(self) -> None:
"""Try to reset timeouts of a destination with no timeouts and get an error."""
self._create_destination("sub0.example.com", None, 0, 0)
channel = self.make_request(
"POST",
self.url + "/sub0.example.com/reset_connection",
access_token=self.admin_user_tok,
)
self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(
"The retry timing does not need to be reset for this destination.",
channel.json_body["error"],
)
def _create_destination(
self,
destination: str,
failure_ts: Optional[int] = None,
retry_last_ts: int = 0,
retry_interval: int = 0,
last_successful_stream_ordering: Optional[int] = None,
) -> None:
"""Create one specific destination
Args:
destination: the destination we have successfully sent to
failure_ts: when the server started failing (ms since epoch)
retry_last_ts: time of last retry attempt in unix epoch ms
retry_interval: how long until next retry in ms
last_successful_stream_ordering: the stream_ordering of the most
recent successfully-sent PDU
"""
self.get_success(
self.store.set_destination_retry_timings(
destination, failure_ts, retry_last_ts, retry_interval
)
)
if last_successful_stream_ordering is not None:
self.get_success(
self.store.set_destination_last_successful_stream_ordering(
destination, last_successful_stream_ordering
)
)
def _create_destinations(self, number_destinations: int) -> None:
"""Create a number of destinations
Args:
number_destinations: Number of destinations to be created
"""
for i in range(0, number_destinations):
dest = f"sub{i}.example.com"
self._create_destination(dest, 50, 50, 50, 100)
def _check_fields(self, content: List[JsonDict]) -> None:
"""Checks that the expected destination attributes are present in content
Args:
content: List that is checked for content
"""
for c in content:
self.assertIn("destination", c)
self.assertIn("retry_last_ts", c)
self.assertIn("retry_interval", c)
self.assertIn("failure_ts", c)
self.assertIn("last_successful_stream_ordering", c)
class DestinationMembershipTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets,
login.register_servlets,
room.register_servlets,
]
def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
self.store = hs.get_datastores().main
self.admin_user = self.register_user("admin", "pass", admin=True)
self.admin_user_tok = self.login("admin", "pass")
self.dest = "sub0.example.com"
self.url = f"/_synapse/admin/v1/federation/destinations/{self.dest}/rooms"
# Record that we successfully contacted a destination in the DB.
self.get_success(
self.store.set_destination_retry_timings(self.dest, None, 0, 0)
)
def test_requester_is_no_admin(self) -> None:
"""If the user is not a server admin, an error 403 is returned."""
self.register_user("user", "pass", admin=False)
other_user_tok = self.login("user", "pass")
channel = self.make_request(
"GET",
self.url,
access_token=other_user_tok,
)
self.assertEqual(403, channel.code, msg=channel.json_body)
self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"])
def test_invalid_parameter(self) -> None:
"""If parameters are invalid, an error is returned."""
# negative limit
channel = self.make_request(
"GET",
self.url + "?limit=-5",
access_token=self.admin_user_tok,
)
self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# negative from
channel = self.make_request(
"GET",
self.url + "?from=-5",
access_token=self.admin_user_tok,
)
self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# invalid search order
channel = self.make_request(
"GET",
self.url + "?dir=bar",
access_token=self.admin_user_tok,
)
self.assertEqual(400, channel.code, msg=channel.json_body)
self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
# invalid destination
channel = self.make_request(
"GET",
"/_synapse/admin/v1/federation/destinations/%s/rooms" % ("invalid",),
access_token=self.admin_user_tok,
)
self.assertEqual(404, channel.code, msg=channel.json_body)
self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"])
def test_limit(self) -> None:
"""Testing list of destinations with limit"""
number_rooms = 5
self._create_destination_rooms(number_rooms)
channel = self.make_request(
"GET",
self.url + "?limit=3",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_rooms)
self.assertEqual(len(channel.json_body["rooms"]), 3)
self.assertEqual(channel.json_body["next_token"], "3")
self._check_fields(channel.json_body["rooms"])
def test_from(self) -> None:
"""Testing list of rooms with a defined starting point (from)"""
number_rooms = 10
self._create_destination_rooms(number_rooms)
channel = self.make_request(
"GET",
self.url + "?from=5",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_rooms)
self.assertEqual(len(channel.json_body["rooms"]), 5)
self.assertNotIn("next_token", channel.json_body)
self._check_fields(channel.json_body["rooms"])
def test_limit_and_from(self) -> None:
"""Testing list of rooms with a defined starting point and limit"""
number_rooms = 10
self._create_destination_rooms(number_rooms)
channel = self.make_request(
"GET",
self.url + "?from=3&limit=5",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_rooms)
self.assertEqual(channel.json_body["next_token"], "8")
self.assertEqual(len(channel.json_body["rooms"]), 5)
self._check_fields(channel.json_body["rooms"])
def test_order_direction(self) -> None:
"""Testing order list with parameter `dir`"""
number_rooms = 4
self._create_destination_rooms(number_rooms)
# get list in forward direction
channel_asc = self.make_request(
"GET",
self.url + "?dir=f",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel_asc.code, msg=channel_asc.json_body)
self.assertEqual(channel_asc.json_body["total"], number_rooms)
self.assertEqual(number_rooms, len(channel_asc.json_body["rooms"]))
self._check_fields(channel_asc.json_body["rooms"])
# get list in backward direction
channel_desc = self.make_request(
"GET",
self.url + "?dir=b",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel_desc.code, msg=channel_desc.json_body)
self.assertEqual(channel_desc.json_body["total"], number_rooms)
self.assertEqual(number_rooms, len(channel_desc.json_body["rooms"]))
self._check_fields(channel_desc.json_body["rooms"])
# test that both lists have different directions
for i in range(0, number_rooms):
self.assertEqual(
channel_asc.json_body["rooms"][i]["room_id"],
channel_desc.json_body["rooms"][number_rooms - 1 - i]["room_id"],
)
def test_next_token(self) -> None:
"""Testing that `next_token` appears at the right place"""
number_rooms = 5
self._create_destination_rooms(number_rooms)
# `next_token` does not appear
# Number of results is the number of entries
channel = self.make_request(
"GET",
self.url + "?limit=5",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_rooms)
self.assertEqual(len(channel.json_body["rooms"]), number_rooms)
self.assertNotIn("next_token", channel.json_body)
# `next_token` does not appear
# Number of max results is larger than the number of entries
channel = self.make_request(
"GET",
self.url + "?limit=6",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_rooms)
self.assertEqual(len(channel.json_body["rooms"]), number_rooms)
self.assertNotIn("next_token", channel.json_body)
# `next_token` does appear
# Number of max results is smaller than the number of entries
channel = self.make_request(
"GET",
self.url + "?limit=4",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_rooms)
self.assertEqual(len(channel.json_body["rooms"]), 4)
self.assertEqual(channel.json_body["next_token"], "4")
# Check
# Set `from` to value of `next_token` for request remaining entries
# `next_token` does not appear
channel = self.make_request(
"GET",
self.url + "?from=4",
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_rooms)
self.assertEqual(len(channel.json_body["rooms"]), 1)
self.assertNotIn("next_token", channel.json_body)
def test_destination_rooms(self) -> None:
"""Testing that request the list of rooms is successfully."""
number_rooms = 3
self._create_destination_rooms(number_rooms)
channel = self.make_request(
"GET",
self.url,
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(channel.json_body["total"], number_rooms)
self.assertEqual(number_rooms, len(channel.json_body["rooms"]))
self._check_fields(channel.json_body["rooms"])
def _create_destination_rooms(self, number_rooms: int) -> None:
"""Create a number rooms for destination
Args:
number_rooms: Number of rooms to be created
"""
for _ in range(0, number_rooms):
room_id = self.helper.create_room_as(
self.admin_user, tok=self.admin_user_tok
)
self.get_success(
self.store.store_destination_rooms_entries((self.dest,), room_id, 1234)
)
def _check_fields(self, content: List[JsonDict]) -> None:
"""Checks that the expected room attributes are present in content
Args:
content: List that is checked for content
"""
for c in content:
self.assertIn("room_id", c)
self.assertIn("stream_ordering", c)
| [
"[email protected]"
] | |
aca98bf3d119fcd3caacbf7ae0055107e799ef81 | e97c25c2e68fbe99b138c0d8c9fff6f17e8860de | /Architecture1/Multiple/Main2.py | 63c27c81615a3b67b55f3f77ac87c5cc46fd80ac | [] | no_license | brycexu/Binarized_Neural_Networks_with_Learned_Pooling-Strategy | 24ab124316458c775011e525d038440f61eccfb1 | 4bdf8e9cdd1f7e17a256bf6efddc874e88e8d4a4 | refs/heads/master | 2020-07-25T14:29:05.228684 | 2019-12-23T11:17:52 | 2019-12-23T11:17:52 | 208,323,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,688 | py | import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import Model as model
from torch.autograd import Variable
import time
from Logger import Logger
import numpy as np
import matplotlib.pyplot as plt
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
best_acc = 0
start_epoch = 0
logger = Logger('./logs2')
Train_Loss = []
Test_Loss = []
Train_Accuracy = []
Test_Accuracy = []
# Dataset
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
trainset = torchvision.datasets.CIFAR10(root='/export/livia/data/xxu/CIFAR10', train=True, download=False, transform=transform_train)
# /home/AN96120/brycexu/CIFAR10
# /export/livia/data/xxu/CIFAR10
trainset, valset = torch.utils.data.random_split(trainset, [40000, 10000])
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=0)
valloader = torch.utils.data.DataLoader(valset, batch_size=128, shuffle=True, num_workers=0)
testset = torchvision.datasets.CIFAR10(root='/export/livia/data/xxu/CIFAR10', train=False, download=False, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=0)
# Model
print('==> Building model..')
model = model.MutipleBNN()
model = nn.DataParallel(model)
model = model.to(device)
criterion = nn.CrossEntropyLoss(reduction='mean')
optimizer1 = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
optimizer2 = torch.optim.Adam(model.parameters(), lr=0.0001, betas=(0.5, 0.999), weight_decay=0.0003)
def update_lr(optimizer):
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * 0.1
# Training
def train(epoch):
global Train_Loss, Train_Accuracy
print('\nEpoch: %d' % epoch)
model.train()
train_loss = 0
correct = 0
total = 0
for name, param in model.named_parameters():
if name == 'module.convolutions.0.alpha' or name == 'module.convolutions.1.alpha' \
or name == 'module.convolutions.2.alpha' or name == 'module.convolutions.3.alpha' \
or name == 'module.convolutions.4.alpha' or name == 'module.convolutions.5.alpha' \
or name == 'module.convolutions.6.alpha' or name == 'module.convolutions.7.alpha' \
or name == 'module.convolutions.8.alpha' or name == 'module.convolutions.9.alpha':
param.requires_grad = False
else:
param.requires_grad = True
start = time.time()
for batch_index, (inputs, targets) in enumerate(trainloader):
inputs, targets = inputs.to(device), targets.to(device)
inputs = Variable(inputs)
targets = Variable(targets)
# Forward
outputs = model(inputs)
loss = criterion(outputs, targets)
# Backward and Optimize
optimizer1.zero_grad()
loss.backward()
for p in list(model.parameters()):
if hasattr(p, 'org'):
p.data.copy_(p.org)
optimizer1.step()
for p in list(model.parameters()):
if hasattr(p, 'org'):
p.org.copy_(p.data.clamp_(-1, 1))
# Results
train_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
train_loss = train_loss / (40000 / 128)
end = time.time()
print('Training Time: %.1f' % (end - start))
print('Loss: %.3f | Accuracy: %.3f' % (train_loss, 100. * correct / total))
# Plot the model
info = {'train_loss': train_loss}
for tag, value in info.items():
logger.scalar_summary(tag, value, epoch + 1)
Train_Loss.append(train_loss)
Train_Accuracy.append(100. * correct / total)
# Update lr
if epoch == 200 or epoch == 400 or epoch == 600:
update_lr(optimizer1)
def val(epoch):
model.train()
val_loss = 0
correct = 0
total = 0
for name, param in model.named_parameters():
if name == 'module.convolutions.0.alpha' or name == 'module.convolutions.1.alpha' \
or name == 'module.convolutions.2.alpha' or name == 'module.convolutions.3.alpha' \
or name == 'module.convolutions.4.alpha' or name == 'module.convolutions.5.alpha' \
or name == 'module.convolutions.6.alpha' or name == 'module.convolutions.7.alpha' \
or name == 'module.convolutions.8.alpha' or name == 'module.convolutions.9.alpha':
param.requires_grad = True
else:
param.requires_grad = False
start = time.time()
for batch_index, (inputs, targets) in enumerate(valloader):
inputs, targets = inputs.to(device), targets.to(device)
inputs = Variable(inputs)
targets = Variable(targets)
# Forward
outputs = model(inputs)
loss = criterion(outputs, targets)
# Backward and Optimize
optimizer2.zero_grad()
loss.backward()
for p in list(model.parameters()):
if hasattr(p, 'org'):
p.data.copy_(p.org)
optimizer2.step()
# Results
val_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
end = time.time()
print('Validating Time: %.1f' % (end - start))
print('Accuracy: %.3f' % (100. * correct / total))
def test(epoch):
global best_acc, Test_Loss, Test_Accuracy
model.eval()
test_loss = 0
correct = 0
total = 0
start = time.time()
with torch.no_grad():
for batch_index, (inputs, targets) in enumerate(testloader):
inputs, targets = inputs.to(device), targets.to(device)
inputs = Variable(inputs)
targets = Variable(targets)
# Forward
outputs = model(inputs)
loss = criterion(outputs, targets)
# Results
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
test_loss = test_loss / (10000 / 100)
end = time.time()
print('Testing Time: %1.f' % (end - start))
print('Loss: %.3f | Accuracy: %.3f' % (test_loss, 100. * correct / total))
# Save the model
acc = 100. * correct / total
if acc > best_acc:
best_acc = acc
# Plot the model
info = {'test_loss': test_loss, 'test_accuracy': acc}
for tag, value in info.items():
logger.scalar_summary(tag, value, epoch + 1)
Test_Loss.append(test_loss)
Test_Accuracy.append(100. * correct / total)
epochs = 800
for epoch in range(start_epoch, start_epoch + epochs):
train(epoch)
val(epoch)
test(epoch)
x1 = np.arange(0, epochs)
y10 = Train_Loss
y11 = Test_Loss
x2 = np.arange(0, epochs)
y20 = Train_Accuracy
y21 = Test_Accuracy
plt.subplot(2, 1, 1)
plt.title('Arch1_Multiple')
plt.plot(x1, y10, 'o-', color='b', label='Train_Loss')
plt.plot(x1, y11, 'o-', color='g', label='Test_Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.subplot(2, 1, 2)
plt.plot(x2, y20, 'o-', color='k', label='Train_Acc')
plt.plot(x2, y21, 'o-', color='r', label='Test_Acc')
plt.xlabel('Epochs')
plt.ylabel('Acc')
plt.legend()
plt.tight_layout()
plt.savefig("Result2.jpg")
plt.show()
| [
"[email protected]"
] | |
556f566fcc8294adf4fa2c33c29d7178f2a6f529 | d0600d512b805c16269faf8e398ccd61aa04a724 | /supervised_learning/regularization/logisteic_regression_with_regularization.py | 125be52f46c5509ab55c1fbbf955002d823e9c74 | [] | no_license | iamMHZ/My-ML-and-DL-experiments | 1ef16b983f46d8440a15019c4cc597cc98f3a0ac | 4b2d1b1e3a9432c0a88796e9c1c489e42509a2e4 | refs/heads/master | 2021-07-11T07:45:12.235042 | 2021-03-12T09:37:37 | 2021-03-12T09:37:37 | 237,808,688 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,732 | py | """
implementation of Logistic regression with L2 regularization
"""
import matplotlib.pyplot as plt
import numpy as np
def load_data():
# https://archive.ics.uci.edu/ml/datasets/Haberman's+Survival
data_file = np.genfromtxt('../../utils/datasets/supervised dataset/haberman.txt', delimiter=',')
X = data_file[:, :2]
y = data_file[:, 3]
# labels are 1 (survived) and 2 (died)
# change 2 to 0
y[y == 2] = 0
return X, y
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def compute_loss(y_true, y_pred, weights, landa):
# calculate loss
epoch_loss = (-y_true * np.log(y_pred)) - ((1 - y_true) * np.log(1 - y_pred))
epoch_loss = np.sum(epoch_loss)
# add L2 regularization
# W.T@W = sum(W^2)
epoch_loss += 0.5 * landa * (np.matmul(weights.T, weights)[0])
# No regularization on the bias so cancel it
epoch_loss -= weights[0]**2
# Replace NaN with zero and infinity with large finite number
# because the -log(x) and -log(1-x) have the tendency to return NaN or INF so we need to make it a number
# making sure that the over all loss does not become INF
epoch_loss = np.nan_to_num(epoch_loss)
return epoch_loss
def compute_gradients(X, y_true, y_pred, weights, landa):
error = y_pred - y_true
# compute gradients
gradients = np.matmul(X.T, error)
# the regularization derivative too
gradients = gradients + (landa * weights)
# Dont apply regularization on the bias so, cancel it
gradients[0] -= landa * weights[0]
return gradients
def fit(X, y, learning_rate=0.0001, epochs=100, landa=0.01):
# initialize the weights
weights = np.random.random((X.shape[1], 1))
losses = []
for i in range(epochs):
# make a prediction
y_pred = sigmoid(np.matmul(X, weights))
epoch_loss = compute_loss(y, y_pred, weights, landa)
# update the wights
gradients = compute_gradients(X, y, y_pred, weights, landa)
weights += -learning_rate * gradients
print(f'Epoch = {i} , Loss = {epoch_loss}')
losses.append(epoch_loss)
# plot the training loss
plt.plot(np.arange(0, epochs), losses)
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.show()
print('Weights: ' + str(weights))
if __name__ == '__main__':
#X, y = load_data()
from sklearn.datasets import make_blobs
X, y = make_blobs(n_samples=1000, n_features=2, centers=2, random_state=14)
# add a column for the bias (bias trick) ==> everything is vectorized
ones_column = np.ones((X.shape[0], 1), np.float)
X = np.append(ones_column, X, axis=1)
y = y.reshape(y.shape[0], 1)
fit(X, y, learning_rate=0.0001, epochs=100, landa=0)
| [
"[email protected]"
] | |
7e471b82d7e87330b02b3ceec2b761f1e46f40d2 | af101b467134e10270bb72d02f41f07daa7f57d8 | /mmagic/models/editors/eg3d/__init__.py | 26ae196273404bdf947578e49c4fcc92287f06b4 | [
"Apache-2.0"
] | permissive | open-mmlab/mmagic | 4d864853417db300de4dfe7e83ce380fd1557a23 | a382f143c0fd20d227e1e5524831ba26a568190d | refs/heads/main | 2023-08-31T14:40:24.936423 | 2023-08-30T05:05:56 | 2023-08-30T05:05:56 | 203,999,962 | 1,370 | 192 | Apache-2.0 | 2023-09-14T11:39:18 | 2019-08-23T13:04:29 | Jupyter Notebook | UTF-8 | Python | false | false | 324 | py | # Copyright (c) OpenMMLab. All rights reserved.
from .camera import GaussianCamera, UniformCamera
from .dual_discriminator import DualDiscriminator
from .eg3d import EG3D
from .eg3d_generator import TriplaneGenerator
__all__ = [
'DualDiscriminator', 'TriplaneGenerator', 'EG3D', 'UniformCamera',
'GaussianCamera'
]
| [
"[email protected]"
] | |
2da4f79b8bd8154cc469bc70d68325c0ad501612 | eb02ead830632738f9723ba14c495e50a3bbf1a2 | /rohdeschwarz/bin/osp.py | 65a6ed5bc7f32ee04cf9a3fc583e3eb04f82871c | [] | no_license | hgrf/rohdeschwarz | 53461fe2f38156448aa6d2eecdee93c0ff3c77c2 | 20ccf128f8bb1a35b83e803e7d5e742046548ee8 | refs/heads/master | 2023-03-19T15:57:49.175428 | 2019-08-23T14:52:57 | 2019-08-23T14:52:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,678 | py | from rohdeschwarz.instruments.ospswitch import OspSwitch
from ruamel import yaml
import argparse
import code
import datetime
import os
import sys
def main():
parser = argparse.ArgumentParser(description='Connect to a Rohde & Schwarz OSP Switch')
parser.add_argument('--visa', metavar='bus', default=False,
help="use VISA with 'bus'")
parser.add_argument('--address', default='127.0.0.1',
help='instrument address')
parser.add_argument('--port', default=5025, type=int,
help='port (TCP only)')
parser.add_argument('--timeout', default=5000, type=int,
help='default instrument timeout (ms)')
parser.add_argument('--driver')
parser.add_argument('--log', default='',
help='SCPI command log filename')
parser.add_argument('--log-to-stdout', action='store_true',
help='print all SCPI IO to stdout')
args = parser.parse_args()
if args.log and args.log_to_stdout:
print('error: cannot use both --log and --log-to-stdout')
parser.print_help()
if not args.driver:
print('Switch matrix driver is required')
parser.print_help()
sys.exit(0)
switch_dict = {}
try:
with open(args.driver, 'r') as f:
switch_dict = yaml.safe_load(f.read())
assert switch_dict
except:
print('Could not read driver file')
sys.exit(0)
osp = OspSwitch(switch_dict)
try:
if args.visa:
osp.open(args.visa, args.address)
else:
osp.open_tcp(args.address, args.port)
if args.timeout:
osp.timeout_ms = args.timeout
if osp.connected():
print("connected: {0}".format(osp.id_string()))
if args.log:
osp.open_log(args.log)
osp.log.write('{0}\n'.format(datetime.datetime.now()))
osp.log.write('--------------------------\n\n')
osp.print_info()
elif args.log_to_stdout:
vna.log = sys.stdout
code.interact('', local=locals())
else:
print('Could not connect to instrument\n')
parser.print_help()
except FileNotFoundError:
print('Could not find driver')
parser.print_help()
except SystemExit:
pass
except:
raise Exception('Error connecting to instrument')
parser.print_help()
finally:
if osp.log:
osp.close_log()
if osp.connected():
osp.close()
if __name__ == "__main__":
main()
sys.exit(0)
| [
"[email protected]"
] | |
fa00adef42307ed4652d9211eee22ed83e31fabb | adb295bf248ded84d2c126d73c58b570af440dc6 | /markers/requires.py | 20d9b626d21fb44e65878609473a70cd685a19f6 | [] | no_license | sshveta/cfme_tests | eaeaf0076e87dd6c2c960887b242cb435cab5151 | 51bb86fda7d897e90444a6a0380a5aa2c61be6ff | refs/heads/master | 2021-03-30T22:30:12.476326 | 2017-04-26T22:47:25 | 2017-04-26T22:47:25 | 17,754,019 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,064 | py | """requires_test(test_name_or_nodeid): Mark a test as requiring another test
If another test is required to have run and passed before a suite of tests has
any hope of succeeding, such as a smoke test, apply this mark to those tests.
It takes a test name as the only positional argument. In the event that the
test name is ambiguous, a full py.test nodeid can be used. A test's nodeid can
be found by inspecting the request.node.nodeid attribute inside the required
test item.
"""
import pytest
_no_mark_arg_err = '%s mark required test name or nodeid as first argument'
def pytest_configure(config):
config.addinivalue_line("markers", __doc__)
def _find_test_in_reports(test_id, reports):
# nodeids end with the test name, so the description of this mark
# oversimplifies things a little bit. The actual check for a test
# match is that any preceding test nodeid ends with the arg passed
# to the mark, so we can easily match the test name, test nodeid, and
# anything in between.
return any([report.nodeid.endswith(test_id) for report in reports])
def pytest_runtest_setup(item):
mark = 'requires_test'
if mark not in item.keywords:
# mark wasn't invoked, short out
return
else:
try:
test_id = item.keywords[mark].args[0]
except IndexError:
# mark called incorrectly, explode
raise Exception(_no_mark_arg_err % mark)
reporter = item.config.pluginmanager.getplugin('terminalreporter')
passed = reporter.stats.get('passed', [])
failed = reporter.stats.get('failed', [])
skipped = reporter.stats.get('skipped', [])
if _find_test_in_reports(test_id, passed):
# Required test passed, short out
return
if _find_test_in_reports(test_id, failed):
error_verb = 'failed'
elif _find_test_in_reports(test_id, skipped):
error_verb = 'was skipped'
else:
error_verb = 'not yet run or does not exist'
errmsg = 'required test %s %s' % (test_id, error_verb)
pytest.skip(errmsg)
| [
"[email protected]"
] | |
519dadf3442f8806b441942ada5df14466a94155 | 8b9e9de996cedd31561c14238fe655c202692c39 | /tree/leetcode_Implement_Trie_Prefix_Tree.py | 3e7a8a4a2d448deaec1eb29454d24f582767a17c | [] | no_license | monkeylyf/interviewjam | 0049bc1d79e6ae88ca6d746b05d07b9e65bc9983 | 33c623f226981942780751554f0593f2c71cf458 | refs/heads/master | 2021-07-20T18:25:37.537856 | 2021-02-19T03:26:16 | 2021-02-19T03:26:16 | 6,741,986 | 59 | 31 | null | null | null | null | UTF-8 | Python | false | false | 2,041 | py | """Implement trie prefix tree
leetcode
Implement a trie with insert, search, and startsWith methods.
"""
class TrieNode(object):
__slots__ = ('val', 'end', 'children')
def __init__(self):
"""
Initialize your data structure here.
"""
self.val = None
self.end = False
self.children = {}
class Trie(object):
def __init__(self):
""""""
self.root = TrieNode()
self.root.val = ''
def insert(self, word):
"""
Inserts a word into the trie.
:type word: str
:rtype: void
"""
node = self.root
for char in word:
try:
child = node.children[char]
except KeyError:
child = TrieNode()
child.val = char
node.children[char] = child
node = child
node.end = True
def _end_node(self, string):
"""Return the last node given string path.
:param string: str
"""
node = self.root
for char in string:
node = node.children[char]
return node
def search(self, word):
"""Returns if the word is in the trie.
:type word: str
:rtype: bool
"""
try:
return self._end_node(word).end
except KeyError:
return False
def startsWith(self, prefix):
"""Returns if there is any word starts with the given prefix.
:type prefix: str
:rtype: bool
"""
try:
self._end_node(prefix)
return True
except KeyError:
return False
def main():
trie = Trie()
trie.insert("somestring")
assert trie.search("somestring")
assert not trie.search("some")
assert trie.startsWith("some")
trie.insert('app')
assert trie.search("app")
trie.insert('apps')
assert trie.search("app")
trie.insert('apple')
assert trie.search("app")
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
7d6fe4baf49c9678097573db683c0a29bb674790 | d2616d89ae8d228ecb7c122f76e7754628674d3c | /CircuitPython_Libraries/adafruit-circuitpython-bundle-4.x-mpy-20200114/examples/lsm6dsox_simpletest.py | 68328396384a30b59583e3108c8057b0163306ae | [] | no_license | simsoon27/Microcontrollers | f95761ca081eefc2913068712dd4609bb02f01f7 | 3615ccefe4e649560d26b0a937dd583008dfee54 | refs/heads/master | 2023-06-18T02:11:35.016222 | 2021-07-16T18:18:35 | 2021-07-16T18:18:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | import time
import board
import busio
import adafruit_lsm6dsox
i2c = busio.I2C(board.SCL, board.SDA)
sox = adafruit_lsm6dsox.LSM6DSOX(i2c)
while True:
print("Acceleration: X:%.2f, Y: %.2f, Z: %.2f m/s^2"%(sox.acceleration))
print("Gyro X:%.2f, Y: %.2f, Z: %.2f degrees/s"%(sox.gyro))
print("")
time.sleep(0.5)
| [
"[email protected]"
] | |
8cb52db936784a8b9eacb4f885340210e48e38ce | 8e954507f612cb375dc55ed7f90896dea131af1b | /test/SConsGnu/GVars/GVarDecls/sconstest-gvardecls3.py | 2526ad653d3af0710efe01d263a881bd21f9968b | [
"MIT"
] | permissive | ptomulik/scons-gnu-build | 8c97ab397b67f58713e95c341608b91fb9c68e00 | 9c46908eed50679d7aaaaf472e324c97545ac837 | refs/heads/master | 2021-01-18T20:30:00.986201 | 2015-10-19T23:48:50 | 2015-10-20T10:58:57 | 6,583,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,030 | py | #
# Copyright (c) 2012-2014 by Pawel Tomulik
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
__docformat__ = "restructuredText"
"""
Tests declaring variables with SConsGnu.GVar.GVarDecls() factory method.
"""
import TestSCons
##############################################################################
# GVarDecls(): Test 3 - using bare arguments instead of instances of _GVarDecl
##############################################################################
test = TestSCons.TestSCons()
test.dir_fixture('../../../../SConsGnu', 'site_scons/SConsGnu')
test.write('SConstruct',
"""
# SConstruct
from SConsGnu.GVars import GVarDecls, ENV, VAR, OPT
x = ( {'env_x' : 'env x default'}, ('var_x', None, 'var x default'), ('-x', {'dest' : 'opt_x', 'default' : 'opt x default'}) )
y = [ {'env_y' : 'env y default'}, ('var_y', None, 'var y default'), ('-y', {'dest' : 'opt_y', 'default' : 'opt y default'}) ]
list = []
list.append( GVarDecls(x = x, y = y) )
list.append( GVarDecls({'x' : x, 'y' : y}) )
i = 0
for v in list:
for c in ['x', 'y']:
print "GVARS[%d][%r].has_xxx_decl(ENV): %r" % (i, c, v[c].has_xxx_decl(ENV))
print "GVARS[%d][%r].has_xxx_decl(VAR): %r" % (i, c, v[c].has_xxx_decl(VAR))
print "GVARS[%d][%r].has_xxx_decl(OPT): %r" % (i, c, v[c].has_xxx_decl(OPT))
print "GVARS[%d][%r].get_xxx_key(ENV): %r" % (i, c, v[c].get_xxx_key(ENV))
print "GVARS[%d][%r].get_xxx_key(VAR): %r" % (i, c, v[c].get_xxx_key(VAR))
print "GVARS[%d][%r].get_xxx_key(OPT): %r" % (i, c, v[c].get_xxx_key(OPT))
print "GVARS[%d][%r].get_xxx_default(ENV): %r" % (i, c, v[c].get_xxx_default(ENV))
print "GVARS[%d][%r].get_xxx_default(VAR): %r" % (i, c, v[c].get_xxx_default(VAR))
print "GVARS[%d][%r].get_xxx_default(OPT): %r" % (i, c, v[c].get_xxx_default(OPT))
i += 1
""")
test.run()
lines = [
"GVARS[0]['x'].has_xxx_decl(ENV): True",
"GVARS[0]['x'].has_xxx_decl(VAR): True",
"GVARS[0]['x'].has_xxx_decl(OPT): True",
"GVARS[0]['x'].get_xxx_key(ENV): 'env_x'",
"GVARS[0]['x'].get_xxx_key(VAR): 'var_x'",
"GVARS[0]['x'].get_xxx_key(OPT): 'opt_x'",
"GVARS[0]['x'].get_xxx_default(ENV): 'env x default'",
"GVARS[0]['x'].get_xxx_default(VAR): 'var x default'",
"GVARS[0]['x'].get_xxx_default(OPT): 'opt x default'",
"GVARS[0]['y'].has_xxx_decl(ENV): True",
"GVARS[0]['y'].has_xxx_decl(VAR): True",
"GVARS[0]['y'].has_xxx_decl(OPT): True",
"GVARS[0]['y'].get_xxx_key(ENV): 'env_y'",
"GVARS[0]['y'].get_xxx_key(VAR): 'var_y'",
"GVARS[0]['y'].get_xxx_key(OPT): 'opt_y'",
"GVARS[0]['y'].get_xxx_default(ENV): 'env y default'",
"GVARS[0]['y'].get_xxx_default(VAR): 'var y default'",
"GVARS[0]['y'].get_xxx_default(OPT): 'opt y default'",
"GVARS[1]['x'].has_xxx_decl(ENV): True",
"GVARS[1]['x'].has_xxx_decl(VAR): True",
"GVARS[1]['x'].has_xxx_decl(OPT): True",
"GVARS[1]['x'].get_xxx_key(ENV): 'env_x'",
"GVARS[1]['x'].get_xxx_key(VAR): 'var_x'",
"GVARS[1]['x'].get_xxx_key(OPT): 'opt_x'",
"GVARS[1]['x'].get_xxx_default(ENV): 'env x default'",
"GVARS[1]['x'].get_xxx_default(VAR): 'var x default'",
"GVARS[1]['x'].get_xxx_default(OPT): 'opt x default'",
"GVARS[1]['y'].has_xxx_decl(ENV): True",
"GVARS[1]['y'].has_xxx_decl(VAR): True",
"GVARS[1]['y'].has_xxx_decl(OPT): True",
"GVARS[1]['y'].get_xxx_key(ENV): 'env_y'",
"GVARS[1]['y'].get_xxx_key(VAR): 'var_y'",
"GVARS[1]['y'].get_xxx_key(OPT): 'opt_y'",
"GVARS[1]['y'].get_xxx_default(ENV): 'env y default'",
"GVARS[1]['y'].get_xxx_default(VAR): 'var y default'",
"GVARS[1]['y'].get_xxx_default(OPT): 'opt y default'",
]
test.must_contain_all_lines(test.stdout(), lines)
test.pass_test()
# Local Variables:
# # tab-width:4
# # indent-tabs-mode:nil
# # End:
# vim: set syntax=python expandtab tabstop=4 shiftwidth=4:
| [
"[email protected]"
] | |
237d6cee2a3d139ef9b4113e6049d2bd85c8819c | 9510ff6d4df1a21cbd7abe66301f890ccd519714 | /captain/__init__.py | c7f41218ea4b7ca1e9e1b8be6d7ea467728c0dd1 | [
"MIT",
"BSD-2-Clause"
] | permissive | stmkza/arposandra | 37ea85d62411ba2fe9a14b664672eb081efde451 | e76f9886f98b3e5068b5f135be398c9e77bd3b65 | refs/heads/master | 2021-04-04T23:10:51.812090 | 2020-02-19T03:57:13 | 2020-02-19T04:00:54 | 248,500,182 | 0 | 0 | NOASSERTION | 2020-03-19T12:42:00 | 2020-03-19T12:41:59 | null | UTF-8 | Python | false | false | 3,525 | py | import os
import sys
import configparser
import gettext
import logging
import json
from collections import namedtuple
from tornado.web import Application
from tornado import locale
from . import database
from . import dispatch
from . import pages
from . import card_page
from . import pageutils
from . import tlinject
from . import news
from . import card_tracking
from . import event_tracker
from . import dict_aggregator
import libcard2
def readonly_app_path(*p):
return os.path.join(os.path.dirname(__file__), *p)
def create_runtime_info():
vi_class = namedtuple("runtime_info_t", ("app_revision", "host_id", "python_version"))
return vi_class(os.environ.get("AS_GIT_REVISION"), os.environ.get("AS_HOST_ID"), sys.version)
class DictionaryAccessProtocolImp(gettext.GNUTranslations):
class Fallback(object):
@classmethod
def gettext(cls, k):
return None
def __init__(self, fp):
super().__init__(fp)
self.add_fallback(self.Fallback)
def lookup_single_string(self, key):
return self.gettext(key)
def static_strings():
sd = {}
catalog = readonly_app_path("gettext")
for langcode in os.listdir(catalog):
sd[langcode] = gettext.translation(
"static", catalog, [langcode], DictionaryAccessProtocolImp
)
return sd
def find_astool_master_version(in_base):
with open(os.path.join(in_base, "astool_store.json"), "r") as jsf:
return json.load(jsf)["master_version"]
def create_dict_aggregator(master, language):
choices = {}
extra = os.environ.get("AS_EXTRA_DICTIONARIES")
if extra:
for tag in extra.split(";"):
rgn_tag, lang_code, name = tag.split(":")
region_root = os.path.join(os.environ.get("AS_DATA_ROOT", "."), rgn_tag)
base = os.path.join(region_root, "masters", find_astool_master_version(region_root))
logging.debug("Loading dictionary: %s", base)
choices[lang_code] = dict_aggregator.Alternative(
name, lang_code, libcard2.string_mgr.DictionaryAccess(base, lang_code)
)
fallback = libcard2.string_mgr.DictionaryAccess(master, language)
return dict_aggregator.DictionaryAggregator(fallback, choices)
def application(master, language, debug):
if os.environ.get("AS_TLINJECT_SECRET", ""):
print("TLInject is enabled for this server.")
locale.set_default_locale("en")
locale.load_gettext_translations(readonly_app_path("gettext"), "tornado")
strings = static_strings()
db_coordinator = database.DatabaseCoordinator()
application = Application(
dispatch.ROUTES,
db_coordinator=db_coordinator,
master=libcard2.master.MasterData(master),
string_access=create_dict_aggregator(master, language),
image_server=os.environ.get("AS_IMAGE_SERVER"),
tlinject_context=tlinject.TLInjectContext(db_coordinator),
news_context=news.NewsDatabase(db_coordinator),
card_tracking=card_tracking.CardTrackingDatabase(db_coordinator),
event_tracking=event_tracker.EventTrackingDatabase(db_coordinator),
template_path=readonly_app_path("webui"),
runtime_info=create_runtime_info(),
tlinject_secret=os.environ.get("AS_TLINJECT_SECRET", "").encode("utf8"),
ui_methods=pageutils.UI_METHODS,
static_path=readonly_app_path("static"),
static_strings=strings,
debug=debug,
autoreload=debug,
)
return application
| [
"[email protected]"
] | |
4593166a53e65ea1bf937735d8ae8662a9e1274d | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /YjwJ6BfujKtmuTMqW_5.py | a15bfdf6d941cfbed39444ebf3d87ec932de004d | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py |
def dice_game(scores):
# this is a hot mess but stfu I'm learning
players = ["1", "2", "3", "4"]
n = len(players)
while n > 1:
this_round = scores[:n]
scores = scores[n:]
totals = [sum(dice) for dice in this_round]
lowest = min(totals)
if totals.count(lowest) == 1:
i = totals.index(lowest)
players.pop(i)
n -= 1
else:
indices = [i for i in range(n) if totals[i] == lowest]
firsts = [this_round[i][0] for i in range(n) if totals[i] == lowest]
lowest = min(firsts)
if firsts.count(lowest) == 1:
i = firsts.index(lowest)
players.pop(indices[i])
n -= 1
return "p{}".format(players[0])
| [
"[email protected]"
] | |
da7e83734ca405319febd520ce310db5e2bc9a97 | 0d8486c1d55c40bebea7c5428930f18165d2d0e9 | /tests/wasp1/AllAnswerSets/builtins_10a.test.py | a45344807ef9afdc35b06110f1c7cd3cacfebb7e | [
"Apache-2.0"
] | permissive | bernardocuteri/wasp | 6f81bf6aa8fb273c91bbf68ecce4ecb195a55953 | 05c8f961776dbdbf7afbf905ee00fc262eba51ad | refs/heads/master | 2021-06-08T11:58:25.080818 | 2020-10-05T16:57:37 | 2020-10-05T16:57:37 | 124,245,808 | 0 | 0 | Apache-2.0 | 2018-03-07T14:13:16 | 2018-03-07T14:13:16 | null | UTF-8 | Python | false | false | 136 | py | input = """
a("10.28399"). a("3.7").
b(X,Y) :- a(X), a(Y), X > Y.
"""
output = """
{a("10.28399"), a("3.7"), b("3.7","10.28399")}
"""
| [
"[email protected]"
] | |
91b0e7fd648b2b62f82c22a4e4128eb97fdb13e8 | 02255565aff9ea18a4d566955cc53ca06090efa4 | /Python 2000/objectcontent.py | 3f77b46a6c5185b2548503401f06c6c763a92224 | [] | no_license | BrainiacRawkib/Practical-Python-for-Begineers | 20a8a3697812bed78646c6af54a6dc195694109a | cb29ea1a38339fcf2fac005feb92b5a72ae98387 | refs/heads/master | 2020-12-01T09:10:06.802758 | 2019-12-28T15:27:40 | 2019-12-28T15:27:40 | 230,598,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 662 | py | class Foo():
zname = 'init.name'
def __enter__(self): # with block
self.zname = 'with.enter.name'
def __exit__(self, xtype, xval, trace):
self.zname = 'with.exit.name'
def name(self):
return self.zname
# Just an object
bla = Foo()
# Consistent block entry / exit values
print('pre:\t', bla.name()) # True
if True:
print('block:\t', bla.name()) # normal
# Activate __enter__ via 'with'
try:
with bla:
print('with:\t', bla.name()) # enter block
print('xblock:\t', bla.name()) # exit block
finally:
print('finally:', bla.name()) # exit block
print('post:\t', bla.name(), 'still!')
| [
"[email protected]"
] | |
a62f98d1e5a82883078bc383513b920a7276a548 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Games/Dementia RL/tdlib/noise.py | 85597a3e5f643493f71b15226ad3d4f3373b01b3 | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:d7ee89dcbac0978f3e0be51be18c9d4df9e003f4ce5a0798db764e5d037d37d4
size 6811
| [
"[email protected]"
] | |
34b53ab2947ece36c60a2fa69b2e7dfd676e8e8e | 1843fd5ccb4377240e664acd21ba5a9369eca2ab | /bluebottle/bb_payouts/utils.py | 33a6ad75ba84ef9eae620ac7c8c84883262d376f | [
"BSD-2-Clause"
] | permissive | raux/bluebottle | ba2e576cebcb6835065004c410b22bd8a6b9ee29 | 49d92b5deb289c1539f99122abc20f845577b879 | refs/heads/master | 2020-03-27T03:20:11.465491 | 2018-08-23T13:09:25 | 2018-08-23T13:09:25 | 145,854,614 | 0 | 0 | null | 2018-08-23T13:05:00 | 2018-08-23T13:04:59 | null | UTF-8 | Python | false | false | 1,887 | py | import decimal
import datetime
from django.conf import settings
from django.utils import timezone
VAT_RATE = decimal.Decimal(settings.VAT_RATE)
def money_from_cents(amount):
"""
Convert monetary amount from cents into a Decimal working
with the MoneyField.
>>> money_from_cents(1000)
Decimal('10')
"""
# Make sure integer work too
amount = float(amount)
return decimal.Decimal(str(amount / 100))
def round_money(amount):
"""
Round monetary values specified as Decimal (2 decimals), used for
displaying results of calculations.
"""
assert isinstance(amount, decimal.Decimal)
return amount.quantize(decimal.Decimal('.01'),
rounding=decimal.ROUND_HALF_DOWN)
def calculate_vat(amount):
"""
Calculate VAT over exclusive amount according to default percentage.
>>> calculate_vat(decimal.Decimal('10'))
Decimal('2.10')
"""
return round_money(amount * VAT_RATE)
def calculate_vat_inclusive(amount):
"""
Calculate the inclusive amount for amounts excluding VAT.
>>> calculate_vat_inclusive(decimal.Decimal('10'))
Decimal('12.10')
"""
factor = VAT_RATE + decimal.Decimal('1.00')
return round_money(amount * factor)
def calculate_vat_exclusive(amount):
"""
Calculate the exclusive amont for amounts including VAT.
>>> calculate_vat_exclusive(decimal.Decimal('12.10'))
Decimal('10.00')
"""
factor = VAT_RATE + decimal.Decimal('1.00')
return round_money(amount / factor)
def date_timezone_aware(date):
"""
Create timezone aware datetime equivalent of date, corresponding
with midnight.
"""
midnight = datetime.time(0, 0)
default_zone = timezone.get_default_timezone()
dt = datetime.datetime.combine(date, midnight)
dt = timezone.make_aware(dt, default_zone)
return dt
| [
"[email protected]"
] | |
21c9807b5c8005f1aa58d06aab6da7c3cd3bacaa | 237cc38de0cf7a6e3661ed552ae771bd972d7438 | /base/virtualenv.py | 79ee98001072d09409b57a7f64b0ecf51161dd99 | [] | no_license | chydream/python | af5ad8a98c78de71e255f7b776f936c4b89c616e | e5bfef53a7770d4f323bd2877f93c8166c563695 | refs/heads/master | 2020-05-07T17:00:33.558178 | 2020-05-05T13:45:19 | 2020-05-05T13:45:19 | 180,708,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | # pip install virtualenv
# virtualenv django1.11
# cd Scripts
# 进入/退出虚拟环境:activate/deactivate
# pip install django==1.11
# pip install flask
# pip install virtualenvwrapper-win workon
# pip install pipenv
# mkdir py26
# cd py26
# pipenv --python 2.7
# pipenv shell
# exit()
# pipenv install requests
# pipenv --help/graph
| [
"[email protected]"
] | |
3b6d99b3f015286573099d0345cb67ee74b38848 | f2bec1dbb86b218fc1b7c9106ff13c15dea8c301 | /Connect Four/main.py | c98d476db4933cbef0032c43ab937f4b5db5e699 | [] | no_license | PuffyShoggoth/hatch | 59e0f3684f041846084316f5bfafda1601cf5d2e | e1b32787cb0571469cd06a469b24890e23b78a58 | refs/heads/master | 2021-01-01T16:52:57.758161 | 2017-07-28T18:47:47 | 2017-07-28T18:47:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,800 | py | from tkinter import *
from board import Board
from ai import Ai
from functools import partial
f = Frame()
f.pack()
a = Ai(4)
buttons = [[] for i in range(7)]
locked = False
emptyhash = 0
for i in range(7):
for j in range(6):
emptyhash^=a.hashvals[i][j][1]
b = Board([[] for i in range(7)], emptyhash)
def disp(message):
global locked, r
locked = True
r = Tk()
r.protocol("WM_DELETE_WINDOW", newgame)
Label(r, text=message).pack()
Button(r, text="Okay", command=newgame).pack()
def addpiece(column):
global buttons, b, a, locked
if len(b.board[column])==6 or locked:
#print(b.board)
return
buttons[column][len(b.board[column])].config(bg = 'red')
b.hash^=a.hashvals[column][len(b.board[column])][1]^a.hashvals[column][len(b.board[column])][2]
b.board[column].append(1)
if b.eval()== 9999998:
disp("Player wins")
return
a.bestmove = -1
a.negamax(b, 4, -9999999, 9999999, -1)
buttons[a.bestmove][len(b.board[a.bestmove])].config(bg = 'yellow')
b.hash^=a.hashvals[a.bestmove][len(b.board[a.bestmove])][0]^a.hashvals[a.bestmove][len(b.board[a.bestmove])][1]
b.board[a.bestmove].append(-1)
if b.eval()== -9999998:
disp("Computer wins")
return
elif sum(len(i) for i in b.board)==42:
disp("It's a tie")
return
def newgame():
global b, locked, r
locked = False
r.destroy()
for i in buttons:
for j in i:
j.config(bg="white")
b = Board([[] for i in range(7)], emptyhash)
#print(b.board)
for i in range(7):
for j in range(6):
butt = Button(f, width = 10, height = 5, bg="white", command = partial(addpiece, i))
butt.grid(row = 5-j, column = i)
buttons[i].append(butt)
mainloop()
| [
"[email protected]"
] | |
e08c8a6e0bd133a06c0f98760780129fa4d7c06f | c2ff2ee2b0c84e047a80cfdf0b0d0b122fc9db79 | /features/himario/mmediting/mmedit/models/backbones/encoder_decoders/gl_encoder_decoder.py | 0039f7eab264a5c2b7f8a6920725c9ef47ef7d1f | [
"Apache-2.0",
"MIT"
] | permissive | obarnard99/vilio | 275dcb62cdb8b2d8c55ab1e73f3a796bd2073a5b | 77aac226c3a0910410f11a5999f8908181f57ccd | refs/heads/master | 2023-06-29T17:02:02.282457 | 2021-06-22T09:50:11 | 2021-06-22T09:50:11 | 337,738,373 | 0 | 0 | MIT | 2021-06-22T09:50:12 | 2021-02-10T13:50:49 | Python | UTF-8 | Python | false | false | 2,220 | py | import torch.nn as nn
from mmcv.runner import auto_fp16, load_checkpoint
from mmedit.models.builder import build_component
from mmedit.models.registry import BACKBONES
from mmedit.utils import get_root_logger
@BACKBONES.register_module()
class GLEncoderDecoder(nn.Module):
"""Encoder-Decoder used in Global&Local model.
This implementation follows:
Globally and locally Consistent Image Completion
The architecture of the encoder-decoder is:\
(conv2d x 6) --> (dilated conv2d x 4) --> (conv2d or deconv2d x 7)
Args:
encoder (dict): Config dict to encoder.
decoder (dict): Config dict to build decoder.
dilation_neck (dict): Config dict to build dilation neck.
"""
def __init__(self,
encoder=dict(type='GLEncoder'),
decoder=dict(type='GLDecoder'),
dilation_neck=dict(type='GLDilationNeck')):
super(GLEncoderDecoder, self).__init__()
self.encoder = build_component(encoder)
self.decoder = build_component(decoder)
self.dilation_neck = build_component(dilation_neck)
# support fp16
self.fp16_enabled = False
@auto_fp16()
def forward(self, x):
"""Forward Function.
Args:
x (torch.Tensor): Input tensor with shape of (n, c, h, w).
Returns:
torch.Tensor: Output tensor with shape of (n, c, h', w').
"""
x = self.encoder(x)
if isinstance(x, dict):
x = x['out']
x = self.dilation_neck(x)
x = self.decoder(x)
return x
def init_weights(self, pretrained=None):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Defaults to None.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
# Here, we just use the default initialization in `ConvModule`.
pass
else:
raise TypeError('pretrained must be a str or None')
| [
"[email protected]"
] | |
97b0c1f5cf34e192fe99aaa9a7dd3a98c3c4aef8 | b9360389f3345c550250a1bd5a5fe58bd7410a71 | /wlct/migrations/0011_auto_20190424_1339.py | a2afd906d1ce15b09ffe3076dc44e15ad81ef9cc | [] | no_license | JustinR17/wzclot | d6f4bd8b46240abbeaa51837c1de1f3115554f17 | 4fa29666eb72aafee28bf57898fecc679fb3d4bb | refs/heads/master | 2023-02-07T22:43:13.827383 | 2021-12-30T22:34:39 | 2021-12-30T22:34:39 | 241,187,417 | 0 | 0 | null | 2020-02-17T19:14:01 | 2020-02-17T19:14:00 | null | UTF-8 | Python | false | false | 1,980 | py | # Generated by Django 2.1.4 on 2019-04-24 20:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wlct', '0010_auto_20190423_1427'),
]
operations = [
migrations.CreateModel(
name='RoundRobinTournament',
fields=[
('tournament_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wlct.Tournament')),
('type', models.CharField(default='Round Robin', max_length=255)),
('games_at_once', models.IntegerField(default=2)),
('first_place', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='first_place', to='wlct.TournamentTeam')),
('second_place', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='second_place', to='wlct.TournamentTeam')),
],
bases=('wlct.tournament',),
),
migrations.AlterField(
model_name='groupstagetournamentgroup',
name='first_place',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='first_place_group', to='wlct.TournamentTeam'),
),
migrations.AlterField(
model_name='groupstagetournamentgroup',
name='second_place',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='second_place_group', to='wlct.TournamentTeam'),
),
migrations.AddField(
model_name='groupstagetournamentgroup',
name='round_robin_tournament',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='wlct.RoundRobinTournament'),
),
]
| [
"[email protected]"
] | |
273f87892d2804ebef8f3e0b4dbd80381774b995 | 470e0d4101bf9ca6d22a515e84e591ce37019ca2 | /back/view.py | b450d5005a7317a5a67084043e8f52db41e59b6d | [] | no_license | chenzh111/myblog | 9eedad7bf4601b8235f290f5592a4d19189954e6 | 545c47cb3dd0a0441029c0281c69ab561b369580 | refs/heads/master | 2020-05-17T02:52:17.908901 | 2019-04-25T16:00:03 | 2019-04-25T16:00:03 | 183,464,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,943 | py | from datetime import datetime
from flask import render_template, request, redirect, session
from werkzeug.security import generate_password_hash, check_password_hash
from flask import Blueprint
from back.model import Article, db, User, Articletype
from utils.functions import login_required
blue = Blueprint('app',__name__)
# 跳转到注册页面
@blue.route('/',methods=['GET'])
def aa():
return redirect('/register/')
# 注册
@blue.route('/register/',methods=["GET","POST"])
def register():
if request.method =="GET":
return render_template('back/register.html')
if request.method == "POST":
username = request.form.get("username")
password = request.form.get("password")
password2 = request.form.get("password2")
if username and password and password2:
user = User.query.filter(User.username == username).first()
if user:
error = "该账号已经被注册了"
return render_template('back/register.html',error = error)
else:
if password2 == password:
user = User()
user.username = username
user.password = generate_password_hash(password)
user.save()
return redirect('/login/')
else:
error = '您两次输入的密码不一样,注册失败'
return render_template('back/register.html',error = error)
else:
error = '请填写完整的信息进行注册'
return render_template('back/register.html',error = error)
# 登录
@blue.route('/login/',methods=["GET","POST"])
def login():
if request.method =="GET":
return render_template('back/login.html')
if request.method == "POST":
username = request.form.get("username")
password = request.form.get("password")
if username and password:
user = User.query.filter(User.username == username).first()
if not user:
error = '账号不存在,请注册后登陆'
return render_template('back/login.html',error=error)
if not check_password_hash(user.password,password):
error = '密码错误,请重新输入'
return render_template('back/login.html',error=error)
session['user_id'] = user.id
return redirect('/index/')
else:
error = '请输入完整信息'
return render_template('back/login.html',error=error)
# 后台主页
@blue.route('/index/',methods=['GET'])
@login_required
def index():
sum = Article.query.count()
return render_template('back/index.html', sum=sum)
@blue.route('/add-article/',methods=['GET'])
def add_article():
name = Articletype.query.order_by(Articletype.id).all()
time = datetime.now()
return render_template('back/add-article.html',time=time,name=name)
@blue.route('/add-category/',methods=['GET'])
def add_category():
return render_template('back/add-category.html')
@blue.route('/article/',methods=['GET'])
def article():
title = Article.query.all()
sum = Article.query.count()
return render_template('back/article.html',title = title, Articletype=Articletype, sum=sum )
@blue.route('/category/',methods=['GET'])
def category():
name = Articletype.query.order_by(Articletype.id).all()
sum = Articletype.query.count()
return render_template('back/category.html',name=name, sum=sum)
@blue.route('/update-article/',methods=['GET'])
def update_article():
name = request.args.to_dict().keys()
for name3 in name:
name2 = Article.query.filter_by(title=name3).first()
content = name2.content
desc = name2.desc
type = name2.type
id = name2.id
name1 = Articletype.query.order_by(Articletype.id).all()
Id = Articletype.query.filter(Articletype.id ==type).first().t_name
return render_template('back/update-article.html',name=name,name1=name1,content=content,desc=desc,type=type,id=id,Article=Article,Id=Id)
@blue.route('/update-category/',methods=['GET'])
def update_category():
name = request.args.to_dict().keys()
return render_template('back/update-category.html',name=name)
@blue.route('/Category/update/',methods=['GET', 'POST'])
def category_update():
name = request.args.to_dict().keys()
for x in name:
name1 = x
name = request.form.get('name')
name2 = Articletype.query.filter_by(t_name = name1).first()
name2.t_name = name
db.session.commit()
return redirect('/category/')
@blue.route('/Article/update/',methods=['GET', 'POST'])
def article_update():
titles = request.args.to_dict().keys()
for x in titles:
name1 = x
title = request.form.get('title')
content = request.form.get('content')
desc = request.form.get('describe')
type = request.form.get('category')
name2 = Article.query.filter_by(title = name1).first()
name2.title = title
name2.content = content
name2.desc = desc
name2.type = type
db.session.commit()
return redirect('/article/')
@blue.route('/delete-category/',methods=['GET','POST'])
def delete_category():
name = request.args.to_dict().keys()
for x in name:
name1 = x
name2 = Articletype.query.filter_by(t_name=name1).first()
db.session.delete(name2)
db.session.commit()
return redirect('/category/')
@blue.route('/delete-article/',methods=['GET','POST'])
def delete_article():
name = request.args.to_dict().keys()
for x in name:
# name1 = x
name2 = Article.query.filter_by(title=x).first()
db.session.delete(name2)
db.session.commit()
return redirect('/article/')
# 删除
@blue.route('/article/checkall/',methods=['GET', 'POST'])
def article_chenkall():
title = request.form.getlist('checkbox[]')
if title is not None:
for title in title:
name2 = Article.query.filter_by(title=title).first()
db.session.delete(name2)
db.session.commit()
else:
pass
return redirect('/article/')
# 创建数据库
@blue.route('/create/')
def create():
db.create_all()
return "xinjian"
@blue.route('/article/add/',methods=['GET','POST'])
def article_add():
category = request.form.get("category")
art = Article()
art.type = category
art.title = request.form.get('title')
art.content = request.form.get('content')
art.desc = request.form.get("describe")
if art.title and art.content and art.desc:
art.save()
else:
return render_template('back/add-article.html')
return redirect('/article/')
@blue.route('/category/add/',methods=['GET','POST'])
def category_add():
type = Articletype()
type.t_name = request.form.get('name')
type.save()
return redirect('/category/')
| [
"[email protected]"
] | |
4549467689c57968bf3e11bc9d9c24c02bf3047d | c24fa89450cccb48fcd481c3cfa475ee0e412e09 | /DatabaseManagementCode/configFileUploaderClass.py | 20195956f745442825dfcd1cbb03071220155465 | [] | no_license | PhoenixYanrongLi/CareEcoSystem_ServerCodeNew | e95d1c552cdcc70aac09482dfda63e253e01fcb0 | b627484694863c425483a04391eedc2ec2ec1098 | refs/heads/master | 2021-01-01T04:34:51.858543 | 2016-04-14T17:57:30 | 2016-04-14T17:57:30 | 56,258,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,048 | py | __author__ = 'Brad, Julien'
import csv
import json
import pickle
import traceback
from dataUploadClass import GenericFileUploader
from dataFileUploaderClass import DataFileUploader
from enum import Enum
from HttpServer.GCMHttpServer import GCMPush
from InHomeMonitoringCode.training_room_estimator import TrainingRoomEstimator
from DatabaseManagementCode.databaseWrapper import DatabaseWrapper
from DatabaseManagementCode.databaseWrapper import Helper
ConfigType = Enum(
'GT' , # Ground trust data
'PROFILE', # Patient profile data
'REGID' , # Registration id change
'CPU' , # Caregiver profile update
'PMU' , # Caregiver patient monitoring update
)
class ConfigFileUploader(GenericFileUploader):
__ERROR_THRESH = 0.2
def __init__(self, database, top_file_path):
super(ConfigFileUploader, self).__init__(database, top_file_path)
self.data_extractors = {
ConfigType.GT : self.extract_ground_trust_data,
ConfigType.PROFILE : self.extract_json_data,
ConfigType.REGID : self.extract_json_data,
ConfigType.CPU : self.extract_json_data,
ConfigType.PMU : self.extract_json_data,
}
self.config_processors = {
ConfigType.GT : self.process_ground_trust_data,
ConfigType.PROFILE : self.process_profile_data,
ConfigType.REGID : self.process_registration_data,
ConfigType.CPU : self.process_caregiver_profile_update,
ConfigType.PMU : self.process_caregiver_patient_monitoring_update,
}
def extract_file_data(self, file):
try:
config_type = getattr(ConfigType, self.data_type_name)
except AttributeError:
print('Unsupported config file type: ' + self.data_type_name)
return None
return self.data_extractors[config_type](file)
def process_data(self, file_data, patient_id, data_stream, data_type, time_stamp):
# Check that data are valid
if file_data is None:
print 'Invalid data provider!'
return False
# Get the right processor and execute it
try:
config_type = getattr(ConfigType, self.data_type_name)
except AttributeError:
print('Unsupported config file type: ' + self.data_type_name)
return None
return self.config_processors[config_type](file_data, patient_id, data_stream, data_type)
@staticmethod
def extract_ground_trust_data(file):
# Extract ground trust data as regular data
reader = csv.reader(file, delimiter=" ")
header, reader = DataFileUploader.extract_header_info(reader)
file_data, reader = DataFileUploader.get_file_data(reader, header, [2, 3])
return [header, file_data]
@staticmethod
def extract_json_data(file):
try:
return json.load(file)
except ValueError: # Not a JSON Object
return None
def process_ground_trust_data(self, file_data, patient_id, data_stream, data_type):
# Process ground trust data as regular data
header = file_data[0]
data = file_data[1]
database_name = '_' + patient_id
table_name = self.uploader_type + self.stream_name + self.data_type_name
column_names, column_types = DataFileUploader.extract_var_types_headers(header)
# Make sure the table exists
if not self.create_table(database_name = database_name,
table_name = table_name,
column_names = column_names,
column_types = column_types):
return False
# Insert the data into the database
return self.insert_into_database(database_name = database_name,
table_name = table_name,
column_names = column_names,
values = data)
def parse_and_store_rooms_info(self, patient_id, tallest_ceiling_height, rooms_data):
"""
Read the given dictionary containing the room data and store those values into the database.
Prepare the rooms for the classifier
:return: success (if the operation is successful), room_ids, beaconcoors: room_ids contains a list of the
mote ids, beaconcoors is a dictionary containing the room names and the corresponding mote location.
"""
column_names = ['ROOM_NAME', 'MOTE_ID', 'FLOOR', 'CEILING_HEIGHT', 'X_DIST_FROM_PREV', 'Y_DIST_FROM_PREV', 'ROOM_IDX']
database_name = '_' + patient_id
table_name = 'rooms'
beaconcoors = {}
room_ids = ''
(x, y) = (0, 0)
# Backup an eventual old table
need_new_table = True
if self.table_exists(database_name, table_name):
# If the values are the same, no need to update the table
if not self.fetch_from_database(database_name = database_name,
table_name = table_name,
to_fetch = '*',
to_fetch_modifiers = 'count'):
return False, None, None, None
if self.fetchone()[0] == len(rooms_data): # Same number of rooms
need_new_table = False
for room_data in rooms_data:
if not self.fetch_from_database(database_name = database_name,
table_name = table_name,
to_fetch = '*',
to_fetch_modifiers = 'count',
where = [[key, room_data[key]] for key in column_names]):
return False, None, None, None
if self.fetchone()[0] == 0: # One of the rooms is different
need_new_table = True
break
if need_new_table:
backup_name = Helper.format_backup_table_name(table_name)
self.rename_table(database_name, table_name, backup_name)
# Save rooms
if need_new_table:
if not self.create_table(
database_name = database_name,
table_name = table_name,
column_names = column_names,
column_types = ['VARCHAR(100)', 'VARCHAR(100) PRIMARY KEY', 'INT', 'FLOAT', 'FLOAT', 'FLOAT', 'INT']
):
return False, None, None
for room_data in rooms_data:
if not self.insert_into_database(database_name = database_name,
table_name = table_name,
column_names = column_names,
values = [room_data[key] for key in column_names]):
return False, None, None
# Extract room's data
for room_data in rooms_data:
data = [room_data[key] for key in column_names]
x += data[4]
y += data[5]
z = data[2] * tallest_ceiling_height + data[3]
beaconcoors[data[0]] = (x, y, z)
room_ids += data[1] + ','
return True, room_ids[0: -1], beaconcoors
def parse_and_store_patient_info(self, patient_id, patient_data, reg_id):
"""
Read the given dictionary containing the patient data and those information for the training
:return: success (if the operation is successful), beaconcoors, start_timestamp, end_timestamp:
See parse_and_store_rooms_info() for beaconcoors, start_timestamp and end_timestamp indicate
the period of data to use for training.
"""
column_names = ['USERNAME', 'TALLEST_CEILING', 'HOME_LATITUDE', 'HOME_LONGITUDE', 'START', 'END', 'VALID',
'clf', 'trainer']
database_name = '_' + patient_id
table_name = 'profile'
data = [patient_data[key] for key in column_names[0: -3]]
data[4] = self.timestamp_to_UTC(data[4])
data[5] = self.timestamp_to_UTC(data[5])
data.append(0)
# Process rooms
success, room_ids, beaconcoors = self.parse_and_store_rooms_info(patient_id, data[1], patient_data['ROOMS'])
if not success:
return False, None, None, None
# Save the registration id
if not self.save_sender_registration_id(patient_id, reg_id):
return False, None, None, None
# Backup an eventual old table
need_new_table = True
if self.table_exists(database_name, table_name):
# If the values are the same, no need to update the table
if not self.fetch_from_database(database_name = database_name,
table_name = table_name,
to_fetch = column_names[0:-3]):
return False, None, None, None
need_new_table = False
for v1, v2 in zip(self.fetchone(), data[0:-1]): # For some reason a conditional fetch doesn't work...
if isinstance(v1, basestring) or isinstance(v2, basestring):
v1 = str(v1)
v2 = str(v2)
if v1 != v2:
need_new_table = True
backup_name = Helper.format_backup_table_name(table_name)
self.rename_table(database_name, table_name, backup_name)
break
# Create a new profile table
if need_new_table:
if not self.create_table(
database_name = database_name,
table_name = table_name,
column_names = column_names,
column_types = ['VARCHAR(100)', 'FLOAT', 'FLOAT', 'FLOAT', 'VARCHAR(100)', 'VARCHAR(100)',
'BOOLEAN DEFAULT FALSE', 'LONGBLOB', 'LONGBLOB']
):
return False, None, None, None
# Save the profile
if not self.insert_into_database(database_name = database_name,
table_name = table_name,
column_names = column_names[0: -2],
values = data):
return False, None, None, None
return True, beaconcoors, data[4], data[5]
def train_house_monitoring(self, patient_id, beaconcoors, start_timestamp, end_timestamp):
"""
Train the classifier.
:return: Returns if the operation is successful and the classifier error. Above 10%, it's a failure
"""
# Retrieve the ground trust entries
database_name = '_' + patient_id
if not self.fetch_from_database(database_name = database_name,
table_name = 'configMMGT',
where = [['type' , 'CR'],
['start', '>=', start_timestamp],
['end' , '<=', end_timestamp]],
order_by = ['start', 'ASC']):
return False, 0
traingtlist = [[row[1], row[2], row[3]] for row in self] # [[label, start, end], ...]
# Retrieve the rssi entries
if not self.fetch_from_database(database_name = database_name,
table_name = 'dataHMRSSI',
where = [['timestamp', '>=', start_timestamp],
['timestamp', '<=', end_timestamp]],
order_by = ['timestamp', 'ASC']):
return False, 0
trainrssilist = [[row[i] for i in range(0, 2 + 2 * row[1])] for row in self]
# Train the classifier
trainer = TrainingRoomEstimator(beaconcoors)
clf, sumdict = trainer.train_classifier(trainrssilist, traingtlist)
error = sumdict["classifier error"]
# Store the classifier and the trainer
clf = pickle.dumps(clf)
trainer = pickle.dumps(trainer)
if error <= ConfigFileUploader.__ERROR_THRESH:
valid = 1
else:
valid = 0
if not self.update_database(database_name = database_name,
table_name = 'profile',
to_update = [['clf', clf], ['trainer', trainer], ['VALID', valid]]):
return False, 0
return True, error
@staticmethod
def notify_user(is_valid, reg_id, error):
""" Notify the user of the success or not of the training. """
content = { 'type': 'PPV' }
if is_valid:
content['status'] = 'valid'
else:
content['status'] = 'invalid'
content['extras'] = error
gcm = GCMPush(content, reg_id)
gcm.start()
def process_profile_data(self, file_data, patient_id, data_stream, data_type):
""" Process the given patient's profile. Stores it, trains a classifier and notify the user of the result. """
# Process the patient's information
reg_id = file_data['reg_id']
success, beaconcoors, start, end = self.parse_and_store_patient_info(patient_id, file_data, reg_id)
if not success:
self.notify_user(False, reg_id, 'Failed to parse and save patient information!')
return True
# Train the classifier and notify the user
try:
print '***** Training starts *****'
success, error = self.train_house_monitoring(patient_id, beaconcoors, start, end)
if not success:
self.notify_user(False, reg_id, 'Failed to train room classifier!')
else:
self.notify_user(error <= ConfigFileUploader.__ERROR_THRESH, reg_id,
'Failed to properly train the room classifier (The error %f is too high).\n'
'Please verify your setup and try again.' % error)
print '***** Training ends (Error: %f) *****' % error
except Exception as e:
print '***** Training failed *****'
traceback.print_exc(e)
error = ''
for err in e.args:
error += str(err) + '\n'
self.notify_user(False, reg_id, error[0: -1])
return True
def process_registration_data(self, file_data, user_id, data_stream, data_type):
""" Update the given registration id. """
if 'old_id' in file_data:
return self.update_registration_id(file_data['old_id'], file_data['new_id'])
return self.update_registration_id(None, file_data['new_id'])
def process_caregiver_profile_update(self, file_data, caregiver_id, data_stream, data_type):
""" Update the caregiver profile. """
column_names = ['username', 'password', 'email']
return self.update_database(database_name = 'config',
table_name = 'caregiverProfiles',
to_update = [[name, file_data[name]] for name in column_names],
where = ['caregiver', caregiver_id])
def process_caregiver_patient_monitoring_update(self, file_data, caregiver_id, data_stream, data_type):
""" Update the caregiver profile. """
monitored = False
if file_data['monitored'] == 'true':
monitored = True
return self.update_database(database_name = 'config',
table_name = 'caregiverPatientPairs',
to_update = ['monitored', monitored],
where = [['caregiver', caregiver_id], ['patient', file_data['patient']]])
| [
"[email protected]"
] | |
5890e87f6173b5cb9740d08aa216544fe73865bb | dffe32dc7f1819217168c42234cc148b142ebe10 | /scripts/download.py | b489821a7590b1f3f7db8fa3e35364f47776e3f5 | [] | no_license | willgdjones/HistoVAE | d4070b991877fb0be83b42b0c110ece6c47563e1 | 5956447b703b5d06115e54843df8c9528a7c1943 | refs/heads/master | 2022-12-12T04:28:06.829139 | 2018-10-18T09:52:25 | 2018-10-18T09:52:25 | 114,638,657 | 10 | 2 | null | 2022-12-08T02:22:28 | 2017-12-18T12:30:02 | Python | UTF-8 | Python | false | false | 903 | py | import sys
import requests.packages.urllib3
import click
import os
import logging
requests.packages.urllib3.disable_warnings()
sys.path.append('.')
from src.classes import Dataset
logger = logging.getLogger(__name__)
@click.command()
@click.option(
'--n_images', default=10,
help="Number of images per tissue"
)
@click.option(
'--n_tissues', default=6,
help="Number of tissues with most numbers of samples"
)
def main(n_images, n_tissues):
os.makedirs('data/images', exist_ok=True)
logger.info('Initializing download script')
dataset = Dataset(n_images=n_images, n_tissues=n_tissues)
dataset.download()
if __name__ == '__main__':
logging.basicConfig(
filename='logs/download.log',
level=logging.DEBUG,
format=(
"%(asctime)s | %(name)s | %(processName)s | "
"%(levelname)s: %(message)s"
)
)
main()
| [
"[email protected]"
] | |
0b096b3ed4acff128d708bd97fa00312a842e694 | 9a5438bdb8e84d0167ddea5458a7f729fdd54121 | /MetaDataApi/tests/test_utils/test_buildDjangoSearchArgs.py | c210a9521f6cdff2456689f3ca027663c69f3df6 | [] | no_license | Grusinator/MetaDataApi | 740fd2be4cb97b670f827a071a0ac8c50f79f8ff | 081f881c735466ed1dbbd68646b821299c5168f8 | refs/heads/master | 2023-07-25T23:58:22.179717 | 2020-03-15T09:36:05 | 2020-03-15T09:36:05 | 149,087,967 | 5 | 1 | null | 2023-07-25T15:39:12 | 2018-09-17T07:45:09 | CSS | UTF-8 | Python | false | false | 1,302 | py | import django
from django.test import TransactionTestCase
from MetaDataApi.utils.django_utils import BuildDjangoSearchArgs
class test_buildDjangoSearchArgs(TransactionTestCase):
# Django requires an explicit setup() when running tests in PTVS
@classmethod
def setUpClass(cls):
super(test_buildDjangoSearchArgs, cls).setUpClass()
django.setup()
def test_build_search_args_from_json(self):
# data = UtilsForTesting.loadStravaActivities()
data = {
"object1": {
"Attribute1": 3,
"Attribute2": {"value": "att2value"},
"object2": {
"attribute3": True,
"attribute4": 5.04
}
}
}
builder = BuildDjangoSearchArgs()
args = builder.build_from_json(data)
expected = {
'from_edge__from_object__label': 'object1',
'from_edge__from_object__from_edge__from_object__label__in':
['Attribute1',
'Attribute2',
'object2'],
'from_edge__from_object__from_edge__from_object__from_edge__from_object__label__in':
['attribute3',
'attribute4']
}
self.assertEqual(args, expected)
| [
"[email protected]"
] | |
8eb431bf2b7b5607bbc88c85c0e16d7c725be8ef | 5ff8cefa68d52d2427bb3d35320cd8bd0d072968 | /Tuples/Ten_most_repititive_words_from_file.py | b6669b1c49adcea563f57b5a8309203017b378e9 | [] | no_license | gsudarshan1990/PythonSampleProjects | a65a111454f8dc551f1cd29901cead0798ad6dc3 | 3c1a5174c5f966b0eed2828221add76ec0d019d5 | refs/heads/master | 2020-05-09T16:02:37.743568 | 2019-07-14T06:22:55 | 2019-07-14T06:22:55 | 181,255,262 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 721 | py | """
Find the 10 most repititive words
"""
import string
filename=input('Enter the file name')
try:
filehandle=open(filename)
except IOError as argument:
print('Could not open the file')
print(argument)
exit()
word_dictionary=dict()
for line in filehandle:
line=line.translate(str.maketrans('','',string.punctuation))
line=line.rstrip()
words=line.split()
for word in words:
word_dictionary[word]=word_dictionary.get(word,0)+1
print(word_dictionary)
list_value_key=list()
for key,value in word_dictionary.items():
list_value_key.append((value,key))
list_value_key_sorted=sorted(list_value_key,reverse=True)
for key,value in list_value_key_sorted[:10]:
print(key,value)
| [
"[email protected]"
] | |
a1df8d2c838e3047109aa681da24dea42a0b925a | ee27325f6a3e6a2d1f5e004aa60f5974ad864ae9 | /contrib/python/plotly/py3/plotly/graph_objs/isosurface/_colorbar.py | 63ce7ada86703e454601be2b3ead12cef9289d20 | [
"Apache-2.0",
"MIT"
] | permissive | alvinahmadov/catboost | f32d2b16be9db7439e429c88feb5676de842fc89 | a6e0caa4779b31199f535cf43b09879d7c653abe | refs/heads/master | 2023-06-12T19:29:52.028508 | 2023-05-11T18:33:03 | 2023-05-11T18:33:03 | 202,584,937 | 0 | 0 | Apache-2.0 | 2019-08-15T17:35:23 | 2019-08-15T17:35:23 | null | UTF-8 | Python | false | false | 79,982 | py | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ColorBar(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "isosurface"
_path_str = "isosurface.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"labelalias",
"len",
"lenmode",
"minexponent",
"nticks",
"orientation",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklabeloverflow",
"ticklabelposition",
"ticklabelstep",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"titlefont",
"titleside",
"x",
"xanchor",
"xpad",
"y",
"yanchor",
"ypad",
}
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# labelalias
# ----------
@property
def labelalias(self):
"""
Replacement text for specific tick or hover labels. For example
using {US: 'USA', CA: 'Canada'} changes US to USA and CA to
Canada. The labels we would have shown must match the keys
exactly, after adding any tickprefix or ticksuffix. labelalias
can be used with any axis type, and both keys (if needed) and
values (if desired) can include html-like tags or MathJax.
The 'labelalias' property accepts values of any type
Returns
-------
Any
"""
return self["labelalias"]
@labelalias.setter
def labelalias(self, val):
self["labelalias"] = val
# len
# ---
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
# lenmode
# -------
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
# minexponent
# -----------
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# orientation
# -----------
@property
def orientation(self):
"""
Sets the orientation of the colorbar.
The 'orientation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['h', 'v']
Returns
-------
Any
"""
return self["orientation"]
@orientation.setter
def orientation(self, val):
self["orientation"] = val
# outlinecolor
# ------------
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
# outlinewidth
# ------------
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# thicknessmode
# -------------
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.isosurface.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.isosurface.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.isosurface.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.isosurface.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.isosurface.col
orbar.tickformatstopdefaults), sets the default property values
to use for elements of isosurface.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.isosurface.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.isosurface.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklabeloverflow
# -----------------
@property
def ticklabeloverflow(self):
"""
Determines how we handle tick labels that would overflow either
the graph div or the domain of the axis. The default value for
inside tick labels is *hide past domain*. In other cases the
default is *hide past div*.
The 'ticklabeloverflow' property is an enumeration that may be specified as:
- One of the following enumeration values:
['allow', 'hide past div', 'hide past domain']
Returns
-------
Any
"""
return self["ticklabeloverflow"]
@ticklabeloverflow.setter
def ticklabeloverflow(self, val):
self["ticklabeloverflow"] = val
# ticklabelposition
# -----------------
@property
def ticklabelposition(self):
"""
Determines where tick labels are drawn relative to the ticks.
Left and right options are used when `orientation` is "h", top
and bottom when `orientation` is "v".
The 'ticklabelposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', 'outside top', 'inside top',
'outside left', 'inside left', 'outside right', 'inside
right', 'outside bottom', 'inside bottom']
Returns
-------
Any
"""
return self["ticklabelposition"]
@ticklabelposition.setter
def ticklabelposition(self, val):
self["ticklabelposition"] = val
# ticklabelstep
# -------------
@property
def ticklabelstep(self):
"""
Sets the spacing between tick labels as compared to the spacing
between ticks. A value of 1 (default) means each tick gets a
label. A value of 2 means shows every 2nd label. A larger value
n means only every nth tick is labeled. `tick0` determines
which labels are shown. Not implemented for axes with `type`
"log" or "multicategory", or when `tickmode` is "array".
The 'ticklabelstep' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["ticklabelstep"]
@ticklabelstep.setter
def ticklabelstep(self, val):
self["ticklabelstep"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.isosurface.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Defaults to
"top" when `orientation` if "v" and defaults
to "right" when `orientation` if "h". Note that
the title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
Returns
-------
plotly.graph_objs.isosurface.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use isosurface.colorbar.title.font instead.
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.isosurface.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# titleside
# ---------
@property
def titleside(self):
"""
Deprecated: Please use isosurface.colorbar.title.side instead.
Determines the location of color bar's title with respect to
the color bar. Defaults to "top" when `orientation` if "v" and
defaults to "right" when `orientation` if "h". Note that the
title's location used to be set by the now deprecated
`titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
"""
return self["titleside"]
@titleside.setter
def titleside(self, val):
self["titleside"] = val
# x
# -
@property
def x(self):
"""
Sets the x position of the color bar (in plot fraction).
Defaults to 1.02 when `orientation` is "v" and 0.5 when
`orientation` is "h".
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar. Defaults to "left" when `orientation` is "v" and
"center" when `orientation` is "h".
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xpad
# ----
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
# y
# -
@property
def y(self):
"""
Sets the y position of the color bar (in plot fraction).
Defaults to 0.5 when `orientation` is "v" and 1.02 when
`orientation` is "h".
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar. Defaults to "middle" when `orientation` is "v"
and "bottom" when `orientation` is "h".
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# ypad
# ----
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. labelalias can be used with
any axis type, and both keys (if needed) and values (if
desired) can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.isosurface.colo
rbar.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.isosur
face.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
isosurface.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.isosurface.colorbar.Title`
instance or dict with compatible properties
titlefont
Deprecated: Please use isosurface.colorbar.title.font
instead. Sets this color bar's title font. Note that
the title's font used to be set by the now deprecated
`titlefont` attribute.
titleside
Deprecated: Please use isosurface.colorbar.title.side
instead. Determines the location of color bar's title
with respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h". Note that the title's location
used to be set by the now deprecated `titleside`
attribute.
x
Sets the x position of the color bar (in plot
fraction). Defaults to 1.02 when `orientation` is "v"
and 0.5 when `orientation` is "h".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction). Defaults to 0.5 when `orientation` is "v"
and 1.02 when `orientation` is "h".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
"""
_mapped_properties = {
"titlefont": ("title", "font"),
"titleside": ("title", "side"),
}
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
labelalias=None,
len=None,
lenmode=None,
minexponent=None,
nticks=None,
orientation=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklabeloverflow=None,
ticklabelposition=None,
ticklabelstep=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs,
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.isosurface.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. labelalias can be used with
any axis type, and both keys (if needed) and values (if
desired) can include html-like tags or MathJax.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
orientation
Sets the orientation of the colorbar.
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.isosurface.colo
rbar.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.isosur
face.colorbar.tickformatstopdefaults), sets the default
property values to use for elements of
isosurface.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of the
axis. The default value for inside tick labels is *hide
past domain*. In other cases the default is *hide past
div*.
ticklabelposition
Determines where tick labels are drawn relative to the
ticks. Left and right options are used when
`orientation` is "h", top and bottom when `orientation`
is "v".
ticklabelstep
Sets the spacing between tick labels as compared to the
spacing between ticks. A value of 1 (default) means
each tick gets a label. A value of 2 means shows every
2nd label. A larger value n means only every nth tick
is labeled. `tick0` determines which labels are shown.
Not implemented for axes with `type` "log" or
"multicategory", or when `tickmode` is "array".
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.isosurface.colorbar.Title`
instance or dict with compatible properties
titlefont
Deprecated: Please use isosurface.colorbar.title.font
instead. Sets this color bar's title font. Note that
the title's font used to be set by the now deprecated
`titlefont` attribute.
titleside
Deprecated: Please use isosurface.colorbar.title.side
instead. Determines the location of color bar's title
with respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h". Note that the title's location
used to be set by the now deprecated `titleside`
attribute.
x
Sets the x position of the color bar (in plot
fraction). Defaults to 1.02 when `orientation` is "v"
and 0.5 when `orientation` is "h".
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar. Defaults to "left" when
`orientation` is "v" and "center" when `orientation` is
"h".
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction). Defaults to 0.5 when `orientation` is "v"
and 1.02 when `orientation` is "h".
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar. Defaults to "middle" when
`orientation` is "v" and "bottom" when `orientation` is
"h".
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
ColorBar
"""
super(ColorBar, self).__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.isosurface.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.isosurface.ColorBar`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("borderwidth", None)
_v = borderwidth if borderwidth is not None else _v
if _v is not None:
self["borderwidth"] = _v
_v = arg.pop("dtick", None)
_v = dtick if dtick is not None else _v
if _v is not None:
self["dtick"] = _v
_v = arg.pop("exponentformat", None)
_v = exponentformat if exponentformat is not None else _v
if _v is not None:
self["exponentformat"] = _v
_v = arg.pop("labelalias", None)
_v = labelalias if labelalias is not None else _v
if _v is not None:
self["labelalias"] = _v
_v = arg.pop("len", None)
_v = len if len is not None else _v
if _v is not None:
self["len"] = _v
_v = arg.pop("lenmode", None)
_v = lenmode if lenmode is not None else _v
if _v is not None:
self["lenmode"] = _v
_v = arg.pop("minexponent", None)
_v = minexponent if minexponent is not None else _v
if _v is not None:
self["minexponent"] = _v
_v = arg.pop("nticks", None)
_v = nticks if nticks is not None else _v
if _v is not None:
self["nticks"] = _v
_v = arg.pop("orientation", None)
_v = orientation if orientation is not None else _v
if _v is not None:
self["orientation"] = _v
_v = arg.pop("outlinecolor", None)
_v = outlinecolor if outlinecolor is not None else _v
if _v is not None:
self["outlinecolor"] = _v
_v = arg.pop("outlinewidth", None)
_v = outlinewidth if outlinewidth is not None else _v
if _v is not None:
self["outlinewidth"] = _v
_v = arg.pop("separatethousands", None)
_v = separatethousands if separatethousands is not None else _v
if _v is not None:
self["separatethousands"] = _v
_v = arg.pop("showexponent", None)
_v = showexponent if showexponent is not None else _v
if _v is not None:
self["showexponent"] = _v
_v = arg.pop("showticklabels", None)
_v = showticklabels if showticklabels is not None else _v
if _v is not None:
self["showticklabels"] = _v
_v = arg.pop("showtickprefix", None)
_v = showtickprefix if showtickprefix is not None else _v
if _v is not None:
self["showtickprefix"] = _v
_v = arg.pop("showticksuffix", None)
_v = showticksuffix if showticksuffix is not None else _v
if _v is not None:
self["showticksuffix"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("thicknessmode", None)
_v = thicknessmode if thicknessmode is not None else _v
if _v is not None:
self["thicknessmode"] = _v
_v = arg.pop("tick0", None)
_v = tick0 if tick0 is not None else _v
if _v is not None:
self["tick0"] = _v
_v = arg.pop("tickangle", None)
_v = tickangle if tickangle is not None else _v
if _v is not None:
self["tickangle"] = _v
_v = arg.pop("tickcolor", None)
_v = tickcolor if tickcolor is not None else _v
if _v is not None:
self["tickcolor"] = _v
_v = arg.pop("tickfont", None)
_v = tickfont if tickfont is not None else _v
if _v is not None:
self["tickfont"] = _v
_v = arg.pop("tickformat", None)
_v = tickformat if tickformat is not None else _v
if _v is not None:
self["tickformat"] = _v
_v = arg.pop("tickformatstops", None)
_v = tickformatstops if tickformatstops is not None else _v
if _v is not None:
self["tickformatstops"] = _v
_v = arg.pop("tickformatstopdefaults", None)
_v = tickformatstopdefaults if tickformatstopdefaults is not None else _v
if _v is not None:
self["tickformatstopdefaults"] = _v
_v = arg.pop("ticklabeloverflow", None)
_v = ticklabeloverflow if ticklabeloverflow is not None else _v
if _v is not None:
self["ticklabeloverflow"] = _v
_v = arg.pop("ticklabelposition", None)
_v = ticklabelposition if ticklabelposition is not None else _v
if _v is not None:
self["ticklabelposition"] = _v
_v = arg.pop("ticklabelstep", None)
_v = ticklabelstep if ticklabelstep is not None else _v
if _v is not None:
self["ticklabelstep"] = _v
_v = arg.pop("ticklen", None)
_v = ticklen if ticklen is not None else _v
if _v is not None:
self["ticklen"] = _v
_v = arg.pop("tickmode", None)
_v = tickmode if tickmode is not None else _v
if _v is not None:
self["tickmode"] = _v
_v = arg.pop("tickprefix", None)
_v = tickprefix if tickprefix is not None else _v
if _v is not None:
self["tickprefix"] = _v
_v = arg.pop("ticks", None)
_v = ticks if ticks is not None else _v
if _v is not None:
self["ticks"] = _v
_v = arg.pop("ticksuffix", None)
_v = ticksuffix if ticksuffix is not None else _v
if _v is not None:
self["ticksuffix"] = _v
_v = arg.pop("ticktext", None)
_v = ticktext if ticktext is not None else _v
if _v is not None:
self["ticktext"] = _v
_v = arg.pop("ticktextsrc", None)
_v = ticktextsrc if ticktextsrc is not None else _v
if _v is not None:
self["ticktextsrc"] = _v
_v = arg.pop("tickvals", None)
_v = tickvals if tickvals is not None else _v
if _v is not None:
self["tickvals"] = _v
_v = arg.pop("tickvalssrc", None)
_v = tickvalssrc if tickvalssrc is not None else _v
if _v is not None:
self["tickvalssrc"] = _v
_v = arg.pop("tickwidth", None)
_v = tickwidth if tickwidth is not None else _v
if _v is not None:
self["tickwidth"] = _v
_v = arg.pop("title", None)
_v = title if title is not None else _v
if _v is not None:
self["title"] = _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("titleside", None)
_v = titleside if titleside is not None else _v
if _v is not None:
self["titleside"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xanchor", None)
_v = xanchor if xanchor is not None else _v
if _v is not None:
self["xanchor"] = _v
_v = arg.pop("xpad", None)
_v = xpad if xpad is not None else _v
if _v is not None:
self["xpad"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yanchor", None)
_v = yanchor if yanchor is not None else _v
if _v is not None:
self["yanchor"] = _v
_v = arg.pop("ypad", None)
_v = ypad if ypad is not None else _v
if _v is not None:
self["ypad"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| [
"[email protected]"
] | |
e349259897bdd4b47f9069b2a79a212c9fb95105 | 0cf7dd2c3c0b28b52f1273e8fe2ea0a87cacc6af | /Other_Algorithms/Page-Rank_algorithm.py | 257f973d0829fcb8ce938cae730675e41299a2ce | [] | no_license | EngrDevDom/Everyday-Coding-in-Python | 61b0e4fcbc6c7f399587deab2fa55763c9d519b5 | 93329ad485a25e7c6afa81d7229147044344736c | refs/heads/master | 2023-02-25T05:04:50.051111 | 2021-01-30T02:43:40 | 2021-01-30T02:43:40 | 274,971,215 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,382 | py | # Page Rank Algorithm
"""
The Page Rank algorithm is applicable in web pages. Web page is a directed
graph, we know that the two components of Directed graphs are -nodes and
connections. The pages are nodes and hyperlinks are the connections, the
connection between two nodes.
We can find out the importance of each page by the Page Rank and it is accurate.
The value of Page Rank is the probability will be between 0 and 1.
The Page Rank value of individual node in a graph depends on the Page Rank value
of all nodes which connect to it and those nodes are cyclically connected to
the nodes whose ranking we want, we use converging iterative method for assigning
values to Page Rank.
"""
import numpy as np
import scipy as sc
import pandas as pd
from fractions import Fraction
def display_format(my_vector, my_decimal):
return np.round((my_vector).astype(np.float), decimals=my_decimal)
my_dp = Fraction(1,3)
Mat = np.matrix([[0,0,1], [Fraction(1,2),0,0], [Fraction(1,2),1,0]])
Ex = np.zeros((3,3))
Ex[:] = my_dp
beta = 0.7
Al = beta * Mat + ((1-beta) * Ex)
r = np.matrix([my_dp, my_dp, my_dp])
r = np.transpose(r)
previous_r = r
for i in range(1,100):
r = Al * r
print(display_format(r,3))
if (previous_r==r).all():
break
previous_r = r
print("Final: \n", display_format(r,3))
print("Sum: ", np.sum(r))
| [
"[email protected]"
] | |
036c90c27ad291c077b467be837247756e9b287d | 1f08436bab6cd03bcfb257e8e49405cbc265195a | /1_Basic_I/Basic/40.py | 229ea4394a7ac7e7ccb627f27a4c9b5fd1e54eec | [] | no_license | kuchunbk/PythonBasic | e3ba6322f256d577e37deff09c814c3a374b93b2 | a87135d7a98be8830d30acd750d84bcbf777280b | refs/heads/master | 2020-03-10T04:28:42.947308 | 2018-04-17T04:25:51 | 2018-04-17T04:25:51 | 129,192,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | import math
def calculate_distance(input_xa, input_ya, input_xb, input_yb):
distance = math.sqrt((input_xa - input_xb) ** 2
+ (input_ya - input_yb) ** 2)
return distance
if __name__ == "__main__":
input_xa = float(input('xa'))
input_ya = float(input('ya'))
input_xb = float(input('xb'))
input_yb = float(input('yb'))
print(calculate_distance(input_xa, input_ya, input_xb, input_yb))
| [
"[email protected]"
] | |
b2e829b6d04503acc4ef29e292c5c36d9b955d55 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /DaVinci_v41r2/Phys/StrippingSelections/python/StrippingSelections/StrippingBandQ/StrippingCharmAssociative.py | 5212680192e3d88cffaf34be5cc48b90d4b6cd11 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,326 | py | #!/usr/bin/env python
# =============================================================================
# $Id: StrippingCharmAssociative.py 176885 2014-08-26 14:00:22Z ibelyaev $
# =============================================================================
## @file
#
# The attempt for stripping of associative ``onium'' production
#
# - dimuon + dimuon
# - dimuon + high-pt gamma
#
# Parasitic:
#
# - dimuon + ( dimuon + gamma ) [ mimic Chi_(b,c) + dimuon ]
# - 2x ( dimuon + gamma ) [ mimic 2xChi_(b,c) ]
#
# Accociative W+ production:
# - dimuon & W+
# - ( dimuon + gamma ) & W+ [ mimic Chi_(b,c)]
#
#
# +-------------------------------------+--------+------+-------+
# | StrippingReport INFO Event 276300, Good event 276300 |
# +-------------------------------------+--------+------+-------+
# | Decision name | Rate,% | Acc. | ms/evt|
# +-------------------------------------+--------+------+-------+
# | double charm |
# +-------------------------------------+--------+------+-------+
# | <>DiMuonAndGammaForCharmAssociative | 0.0467 | 129 | 0.668 |
# | <>DoubleDiMuonForCharmAssociative | 0.0405 | 112 | 0.037 |
# | <>ChiAndDiMuonForCharmAssociative | 0.0405 | 112 | 0.037 |
# | <>DiChiForCharmAssociative | 0.0405 | 112 | 0.037 |
# +-------------------------------------+--------+------+-------+
# | charm & W+ |
# +-------------------------------------+--------+------+-------+
# | <>DiMuonAndWForCharmAssociative | 0.0076 | 21 | 0.060 |
# | <>ChiAndWForCharmAssociative | 0.0054 | 15 | 0.033 |
# +-------------------------------------+--------+------+-------+
#
# @author Vanya BELYAEV [email protected]
# @date 2011-05-26
#
# $Revision: 176885 $
# Last modification $Date: 2013-01-05 21:21:10 +0100
# by $Author: ibelyaev $
# =============================================================================
"""The attempt for stripping of associative ``onium'' production
- dimuon + dimuon
- dimuon + high-pt gamma
Parasitic:
- dimuon + ( dimuon + gamma ) [ mimic Chi_(b,c) + dimuon ]
- 2x ( dimuon + gamma ) [ mimic 2xChi_(b,c) ]
Accociative W+ production:
- dimuon & W+
- ( dimuon + gamma ) & W+ [ mimic Chi_(b,c)]
+-------------------------------------+--------+------+-------+
| StrippingReport INFO Event 276300, Good event 276300 |
+-------------------------------------+--------+------+-------+
| Decision name | Rate,% | Acc. | ms/evt|
+-------------------------------------+--------+------+-------+
| double charm |
+-------------------------------------+--------+------+-------+
| <>DiMuonAndGammaForCharmAssociative | 0.0467 | 129 | 0.668 |
| <>DoubleDiMuonForCharmAssociative | 0.0405 | 112 | 0.037 |
| <>ChiAndDiMuonForCharmAssociative | 0.0405 | 112 | 0.037 |
| <>DiChiForCharmAssociative | 0.0405 | 112 | 0.037 |
+-------------------------------------+--------+------+-------+
| charm & W+ |
+-------------------------------------+--------+------+-------+
| <>DiMuonAndWForCharmAssociative | 0.0076 | 21 | 0.060 |
| <>ChiAndWForCharmAssociative | 0.0054 | 15 | 0.033 |
+-------------------------------------+--------+------+-------+
"""
# =============================================================================
__author__ = 'Vanya BELYAEV [email protected]'
__date__ = '2011-05-26'
__version__ = '$Revision: 176885 $'
# =============================================================================
__all__ = (
'StrippingCharmAssociativeConf',
'default_config'
)
# =============================================================================
from Gaudi.Configuration import *
from GaudiKernel.SystemOfUnits import GeV, MeV, mm
from GaudiConfUtils.ConfigurableGenerators import FilterDesktop, CombineParticles
from PhysSelPython.Wrappers import Selection
#
# Attention: we need prompt onia, thus "All" Loose muons here
from StandardParticles import ( StdAllLooseMuons , ## PROMPT muons!
StdLooseAllPhotons )
#
from StrippingConf.StrippingLine import StrippingLine
from StrippingUtils.Utils import LineBuilder
# =============================================================================
## logging
# =============================================================================
import logging
logger = logging.getLogger(__name__)
if not logger.handlers :
logging.basicConfig()
logger.setLevel(logging.INFO)
# =============================================================================
## Define the default configuration
_default_configuration_ = {
#
## Selection of basic particles
#
'PhotonCuts' : ' PT > 3.0 * GeV ' ,
'MuonCuts' : ' ISMUON & ( PT > 650 * MeV ) & ( TRCHI2DOF < 5 ) ' ,
#
## photons from chi_(c,b)
#
'GammaChi' : ' ( PT > 400 * MeV ) & ( CL > 0.05 ) ' ,
#
## W+- selection
#
'WCuts' : " ( 'mu+'== ABSID ) & ( PT > 15 * GeV )" ,
#
## Global Event cuts
#
'CheckPV' : True ,
#
## Technicalities:
#
'Preambulo' : [
#
## shortcut for chi2 of vertex fit
'chi2vx = VFASPF(VCHI2) ' ,
#
## shortcut for the c*tau
"from GaudiKernel.PhysicalConstants import c_light" ,
#
## dimuons:
"psi = ADAMASS ( 'J/psi(1S)' ) < 125 * MeV" ,
"psi_prime = ADAMASS ( 'psi(2S)' ) < 125 * MeV" ,
"mu2_tight = ( chi2vx < 10 ) & ( MINTREE ( 'mu+' == ABSID , PT ) > 900 * MeV ) " ,
"dimu_tight = ( PT > 3.0 * GeV ) & mu2_tight " ,
"psi_tight = ( ADMASS ( 'J/psi(1S)' ) < 110 * MeV ) & dimu_tight " ,
"psi_prime_tight = ( ADMASS ( 'psi(2S)' ) < 110 * MeV ) & dimu_tight " ,
"dimuon_heavy = ( M > 4.9 * GeV ) & dimu_tight " ,
"dimuon_tight = psi_tight | psi_prime_tight | dimuon_heavy " ,
] ,
#
## monitoring ?
'Monitor' : False ,
#
## pescales
'DiMuonAndGammaPrescale' : 1.0 ,
'DoubleDiMuonPrescale' : 1.0 ,
'ChiAndDiMuonPrescale' : 1.0 ,
'DiChiPrescale' : 1.0 ,
'DiMuonAndWPrescale' : 1.0 ,
'ChiAndWPrescale' : 1.0
# =========================================================================
}
# =============================================================================
## the mandatory element for stripping framework
default_config = {
#
'NAME' : 'CharmAssociative' ,
'WGs' : [ 'BandQ' ] ,
'CONFIG' : _default_configuration_ ,
'BUILDERTYPE' : 'StrippingCharmAssociativeConf' ,
'STREAMS' : { 'Leptonic' : [ 'StrippingDiMuonAndGammaForCharmAssociative',
'StrippingDoubleDiMuonForCharmAssociative' ,
'StrippingChiAndDiMuonForCharmAssociative' ,
'StrippingDiChiForCharmAssociative' ,
'StrippingDiMuonAndWForCharmAssociative' ,
'StrippingChiAndWForCharmAssociative' ] }
}
# =============================================================================
## @class StrippingCharmAssociativeConf
# Helper class required by Tom & Greig
# @author Vanya BELYAEV [email protected]
# @date 2011-05-26
class StrippingCharmAssociativeConf(LineBuilder) :
"""
Helper class to configure 'CharmAssociative/PromptCharm'-lines
"""
__configuration_keys__ = tuple ( _default_configuration_.keys() )
## private set of selections
__selections_ = {}
## get the default configuration
@staticmethod
def defaultConfiguration( key = None ) :
"""
Get the default configurtaion
>>> conf = StrippingCharmAssociativeConf.defaultConfiguration()
Get the elements of default configurtaion:
>>> prescale = StrippingCharmAssociativeConf.defaultConfiguration( 'DoubleDiMuon' )
"""
from copy import deepcopy
_config = deepcopy ( _default_configuration_ )
if key : return _config[ key ]
return _config
## constructor
def __init__( self , name , config ) :
"""
Constructor
"""
# check the names
if not name : name = 'CharmAssociative'
# check the names
if 'CharmAssociative' != name :
logger.warning ( 'The non-default name is specified "%s"' % name )
from copy import deepcopy
_config = deepcopy ( _default_configuration_ )
if isinstance ( config , dict ):
_config.update ( config )
LineBuilder.__init__( self , name , _config )
else :
LineBuilder.__init__( self , name , config )
## private set of selections
self.__selections_ = {}
if not self.__selections_.has_key ( self.name() ) :
self.__selections_[ self.name() ] = {}
self.__selections_[ self.name() ]['CONFIG'] = deepcopy ( _config )
keys = _config.keys()
for key in keys :
if not key in _default_configuration_ :
raise KeyError("Invalid key is specified: '%s'" % key )
val = _config[key]
if val != _default_configuration_ [ key ] :
logger.debug ('new configuration: %-16s : %s ' % ( key , _config[key] ) )
keys = _config.keys()
for key in keys :
if not key in _default_configuration_ :
raise KeyError("Invalid key is specified: '%s'" % key )
val = _config[key]
if val != _default_configuration_ [ key ] :
logger.debug ('new configuration: %-16s : %s ' % ( key , _config[key] ) )
## cehck for prescales
for keys in self.keys() :
if 0 > key.find('Prescale') and 0 > key.find('prescale') : continue
if 1 != self[key] : logger.warning ( '%s is %s' % ( key , self[key] ) )
for line in self._lines_associative_onia () :
self.registerLine(line)
logger.debug ( "Register line: %s" % line.name () )
# =========================================================================
## pure technical method for creation of selections
# =========================================================================
def make_selection ( self ,
tag ,
algotype ,
inputs ,
*args ,
**kwargs ) :
"""
Pure Technical method for creation of simple 1-step selections
"""
sel_tag = '%s_Selection' % tag
sel_name = 'Sel%sFor%s' % ( tag , self.name() )
#
## check existing selection
#
sel = self._selection ( sel_tag )
if sel : return sel
#
## adjust a bit the arguments
if not kwargs.has_key ( 'Preambulo' ) :
kwargs ['Preambulo' ] = self['Preambulo']
if not kwargs.has_key ( 'ParticleCombiners' ) :
kwargs ['ParticleCombiners'] = { '' : 'LoKi::VertexFitter:PUBLIC' }
#
## use "simple-selection"
#
from PhysSelPython.Wrappers import SimpleSelection
sel = SimpleSelection (
sel_name ,
algotype ,
inputs ,
*args ,
**kwargs )
#
return self._add_selection( sel_tag , sel )
## get the common preambulo:
def preambulo ( self ) : return self [ 'Preambulo' ]
## get photon cuts
def photonCuts ( self ) : return self [ 'PhotonCuts' ]
## get muon cuts
def muonCuts ( self ) : return self [ 'MuonCuts' ]
## get the dimuons
def DiMuon ( self ) :
"""
Get the dimuons
"""
from GaudiConfUtils.ConfigurableGenerators import CombineParticles
## ATTENITON: we need PROMPT onia, thus 'AllMuons' are here
from StandardParticles import StdAllLooseMuons as inpts
return self.make_selection (
'DiMuon' ,
CombineParticles ,
[ inpts ] ,
DecayDescriptor = 'J/psi(1S) -> mu+ mu-' ,
DaughtersCuts = { 'mu+' : self.muonCuts() } ,
CombinationCut = " psi | psi_prime | ( 8 * GeV < AM ) " ,
MotherCut = " chi2vx < 20 "
)
## get the dimuons & gamma
def DiMuonAndGamma ( self ) :
"""
Get dimuon & gamma
Select events with dimuon and high-pt photon
at least one dimuon
"""
from GaudiConfUtils.ConfigurableGenerators import CombineParticles
from StandardParticles import StdLooseAllPhotons as gamma
return self.make_selection (
'DiMuonAdGamma' ,
CombineParticles ,
[ self.DiMuon() , gamma ] ,
DecayDescriptor = " chi_b2(1P) -> J/psi(1S) gamma" ,
DaughtersCuts = { 'J/psi(1S)' : " dimuon_tight " ,
'gamma' : self.photonCuts () } ,
CombinationCut = " AALL " ,
MotherCut = " ALL "
)
## get the double dimuons
def DoubleDiMuon ( self ) :
"""
Get 2xdimuon
Select events with two dimuons
"""
from GaudiConfUtils.ConfigurableGenerators import CombineParticles
return self.make_selection (
'DoubleDiMuon' ,
CombineParticles ,
[ self.DiMuon() ] ,
DecayDescriptor = "chi_b2(1P) -> J/psi(1S) J/psi(1S)" ,
CombinationCut = " AALL " ,
MotherCut = " ALL "
)
## get chi_(c,b) & dimuon
def ChiAndDiMuon ( self ) :
""" Construct Chi_(c,b) + dumuon line """
from GaudiConfUtils.ConfigurableGenerators import CombineParticles
from StandardParticles import StdLooseAllPhotons as gamma
pre_chidimu = self.make_selection (
'PreChiAndDiMuon' ,
CombineParticles ,
[ self.DoubleDiMuon() , self.DiMuon() , gamma ] , ## the first one is fake
DecayDescriptor = "chi_b2(1P) -> J/psi(1S) J/psi(1S) gamma " ,
DaughtersCuts = { 'gamma' : self['GammaChi'] } ,
## mimic chi_(c,b):
CombinationCut = """
( AM13 - AM1 < 1.05 * GeV ) |
( AM23 - AM2 < 1.05 * GeV )
""" ,
MotherCut = " ALL "
)
## apply pi0-veto-tagger !
from GaudiConfUtils.ConfigurableGenerators import Pi0Veto__Tagger
return self.make_selection (
'ChiAndDiMuon' ,
Pi0Veto__Tagger ,
[ pre_chidimu ] ,
MassWindow = 25 * MeV ,
MassChi2 = -1 ,
ExtraInfoIndex = 25011 ## unique !
)
## get 2xXhi_(c,b) & dimuon
def DiChi ( self ) :
""" Construct 2xChi_(c,b) line """
from GaudiConfUtils.ConfigurableGenerators import CombineParticles
from StandardParticles import StdLooseAllPhotons as gamma
pre_dichi = self.make_selection (
'PreDiChi' ,
CombineParticles ,
[ self.DoubleDiMuon() , self.DiMuon() , gamma ] , ## the first one is fake
DecayDescriptor = "chi_b2(1P) -> J/psi(1S) J/psi(1S) gamma gamma" ,
DaughtersCuts = { 'gamma' : self [ 'GammaChi' ] } ,
## mimic 2xchi_(c,b):
CombinationCut = """
( ( AM13 - AM1 < 1.05 * GeV ) & ( AM24 - AM2 < 1.05 * GeV ) ) |
( ( AM14 - AM1 < 1.05 * GeV ) & ( AM23 - AM2 < 1.05 * GeV ) )
""" ,
MotherCut = " ALL "
)
## apply pi0-veto-tagger !
from GaudiConfUtils.ConfigurableGenerators import Pi0Veto__Tagger
return self.make_selection (
'DiChi' ,
Pi0Veto__Tagger ,
[ pre_dichi ] ,
MassWindow = 25 * MeV ,
MassChi2 = -1 ,
ExtraInfoIndex = 25012 ## unique !
)
## W+- selection
def W ( self ) :
""" Get simple W+-selection """
from GaudiConfUtils.ConfigurableGenerators import FilterDesktop
from StandardParticles import StdAllLooseMuons as inpts
return self.make_selection (
'W' ,
FilterDesktop ,
[ inpts ] ,
Code = self['WCuts']
)
## select dimuon + W
def DiMuonAndW ( self ) :
""" Select dimuon + W+ """
from GaudiConfUtils.ConfigurableGenerators import CombineParticles
return self.make_selection (
'DiMuW' ,
CombineParticles ,
[ self.W () , self.DiMuon() ] ,
DecayDescriptor = "[chi_b2(2P) -> J/psi(1S) mu+]cc" ,
CombinationCut = " AALL " ,
MotherCut = " ALL "
)
## select chi + W
def ChiAndW ( self ) :
""" Select chi & W+ """
from GaudiConfUtils.ConfigurableGenerators import CombineParticles
from StandardParticles import StdLooseAllPhotons as gamma
pre_chiw = self.make_selection (
'PreChiAndW' ,
CombineParticles ,
[ self.DiMuon() , self.W() , gamma ] ,
DecayDescriptor = "[chi_b2(2P) -> J/psi(1S) gamma mu+]cc" ,
DaughtersCuts = {
"J/psi(1S)" : " ( M < 3.21 * GeV ) | in_range ( 8.5 * GeV , M , 12.0 * GeV ) " ,
'gamma' : self['GammaChi']
} ,
## mimic chi_(c,b):
CombinationCut = """
( AM12 - AM1 < 1.05 * GeV ) |
( AM12 - AM1 < 1.05 * GeV )
""" ,
MotherCut = " ALL "
)
## apply pi0-veto-tagger !
from GaudiConfUtils.ConfigurableGenerators import Pi0Veto__Tagger
return self.make_selection (
'ChiAndW' ,
Pi0Veto__Tagger ,
[ pre_chiw ] ,
MassWindow = 25 * MeV ,
MassChi2 = -1 ,
ExtraInfoIndex = 25013 ## unique !
)
## get all dicharm lines
def _lines_associative_onia ( self ) :
sel = self._selection ( 'OniaAndXLines' )
if sel : return sel
sel = [
##
StrippingLine (
"DiMuonAndGammaFor" + self._name ,
prescale = self [ 'DiMuonAndGammaPrescale' ] , ## ATTENTION! Prescale here !!
checkPV = self [ 'CheckPV'] ,
algos = [ self . DiMuonAndGamma () ] ,
MDSTFlag = False , ## try to save it on MDST.DST
) ,
##
StrippingLine (
"DoubleDiMuonFor" + self._name ,
prescale = self [ 'DoubleDiMuonPrescale' ] , ## ATTENTION! Prescale here !!
checkPV = self [ 'CheckPV' ] ,
algos = [ self . DoubleDiMuon () ]
) ,
##
StrippingLine (
"ChiAndDiMuonFor" + self._name ,
prescale = self [ 'ChiAndDiMuonPrescale' ] , ## ATTENTION! Prescale here !!
checkPV = self [ 'CheckPV' ] ,
algos = [ self . ChiAndDiMuon () ]
) ,
##
StrippingLine (
"DiChiFor" + self._name ,
prescale = self [ 'DiChiPrescale' ] , ## ATTENTION! Prescale here !!
checkPV = self [ 'CheckPV'] ,
algos = [ self . DiChi () ]
) ,
##
StrippingLine (
"DiMuonAndWFor" + self._name ,
prescale = self [ 'DiMuonAndWPrescale' ] , ## ATTENTION! Prescale here !!
checkPV = self [ 'CheckPV' ] ,
algos = [ self . DiMuonAndW () ]
) ,
##
StrippingLine (
"ChiAndWFor" + self._name ,
prescale = self [ 'ChiAndWPrescale' ] , ## ATTENTION! Prescale here !!
checkPV = self [ 'CheckPV' ] ,
algos = [ self . ChiAndW () ]
) ,
##
]
#
return self._add_selection ( 'OniaAndXLines' , sel )
## get the selection, associated with some nickname name
def _selection ( self , nick ) :
"""
Get the selection, associated with some nickname name
"""
if not self.__selections_.has_key ( self.name() ) :
self.__selections_[ self.name() ] = {}
return self.__selections_[ self.name() ].get( nick , None )
## add the selection, associated with some nickname name
def _add_selection ( self , nick , sel ) :
"""
Add the selection, associated with some nickname name
"""
if not self.__selections_.has_key ( self.name() ) :
self.__selections_[ self.name() ] = {}
if self.__selections_[ self.name()].has_key( nick ) :
raise AttributeError , "Selection '%s'already exists " % nick
self.__selections_[ self.name() ][ nick ] = sel
return sel
# =============================================================================
if '__main__' == __name__ :
logger.info ( 80*'*' )
logger.info ( __doc__ )
logger.info ( ' Author : %s' % __author__ )
logger.info ( ' Date : %s' % __date__ )
logger.info ( 80 * '*' )
##
clines = set()
logger.info ( 70 * '-' )
logger.info ( ' %-15s | %-40s ' % ( 'STREAM' , 'LINE' ) )
logger.info ( 70 * '-' )
for stream in default_config['STREAMS'] :
lines = default_config['STREAMS'][stream]
for l in lines :
logger.info ( ' %-15s | %-40s ' % ( stream , l ) )
clines.add ( l )
logger.info ( 80 * '*' )
##
logger.info ( ' The output locations for the default configuration: ' )
##
_conf = StrippingCharmAssociativeConf ( 'CharmAssociative' ,
config = default_config['CONFIG'] )
##
##
_ln = ' ' + 61*'-' + '+' + 60*'-'
logger.info ( _ln )
logger.info ( ' %-60s| %-45s | %s ' % ( 'Output location' ,
'Stripping line name' ,
'MDST.DST' ) )
logger.info ( _ln )
for l in _conf.lines() :
lout = l.outputLocation()
lname = l.name()
flag = l.MDSTFlag
logger.info ( ' %-60s| %-45s | %s ' % ( lout, lname , flag ) )
if not lname in clines :
raise AttributeError ('Unknown Line %s' % lname )
clines.remove ( lname )
logger.info ( _ln )
logger.info ( 80*'*' )
if clines :
raise AttributeError('Undeclared lines: %s' % clines )
keys = default_config['CONFIG'].keys()
keys.sort()
prescale = [ i for i in keys if 0 <= i.find('Prescale') ]
other = [ i for i in keys if not i in prescale ]
logger.info ( 'Configuration keys are: %s' % other )
logger.info ( 'Prescale keys are: %s' % prescale )
logger.info ( 80*'*' )
## make dot-graphs
try:
selections = _conf._selections_private()
for s in selections :
from SelPy.graph import graph
o = graph ( s , format = 'png' )
if o : logger.info ( "Generate DOT-graph: %s" % o )
else : logger.error ( "Can't produce DOT=-graph for %s" % s.name() )
except : pass
# =============================================================================
# The END
# =============================================================================
| [
"[email protected]"
] | |
e4f524c34ba0082859ddb9941baece1aca2e4699 | 72c4cea551df0bee51c5740926b2fdb1feaf3470 | /tools/extjs_cc/js_util_types.py | 73c302d3e7b293be2584a95ad8a69eb0447b1b2d | [
"MIT"
] | permissive | joeedh/fairmotion | 2955fda64b36a634c42c3382b20f18dae189d13f | fc0bca395057cd3e78f91bcb1796b6c5eda73d2b | refs/heads/master | 2023-05-26T20:03:51.583970 | 2023-05-14T16:07:53 | 2023-05-14T16:07:53 | 34,099,859 | 1 | 0 | MIT | 2023-03-05T21:45:57 | 2015-04-17T06:13:09 | JavaScript | UTF-8 | Python | false | false | 1,200 | py | class SortedDict (dict):
def __iter__(self):
return self.keys()
def keys(self):
keys = []
for k in dict.keys(self):
keys.append(k)
keys.sort()
return keys
class odict:
def __init__(self):
self.items = []
self.dict = {}
self.keypos = {}
def __setitem__(self, key, value):
if key not in self.dict:
self.items.append(key)
self.keypos[key] = len(self.items)-1
self.dict[key] = value
def __getitem__(self, key):
return self.dict[key]
def __delitem__(self, key):
i = self.keypos[key]
self.items.pop(i)
del self.keypos[key]
del self.dict[key]
def __contains__(self, key):
return key in self.dict
def __len__(self):
return len(self.dict)
def __iter__(self):
return iter(self.items)
def keys(self):
return list(self.items)
def values(self):
vs = []
for k in self.items:
vs.append(self.dict[k])
return vs
def __str__(self):
s = "odict{"
for i, k in enumerate(self.items):
if i > 0: s += ", "
s += "%s: %s" % (str(k), str(self[k]))
s += "}"
return s
def __repr__(self):
return str(self)
| [
"[email protected]"
] | |
a77c332b2b115927dcde0f7f51d933d05a77ed39 | 387587c753e76d98a6a0401327766c45561d5109 | /ros_catkin_ws/build_isolated/roswtf/catkin_generated/pkg.develspace.context.pc.py | 1092e102641507a591a765f81fb850af55d4d319 | [
"MIT"
] | permissive | letrend/neopixel_fpga | 7a4819a566fab02bd602c3338b8aaa0ddf4bee85 | d9247417a9d311eceebad5898571846c6e33a44a | refs/heads/master | 2021-01-23T01:00:55.290431 | 2017-05-30T20:15:38 | 2017-05-30T20:15:38 | 92,855,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "roswtf"
PROJECT_SPACE_DIR = "/root/ros_catkin_ws/devel_isolated/roswtf"
PROJECT_VERSION = "1.12.7"
| [
"[email protected]"
] | |
96e30180d97e49f108b03f284c1cd9180a406a6f | 4ca8df3a127e9b15cbfecea6505928741f685a63 | /case_crawler/apps/anjuke/anjuke_detail.py | 2056d2c0dd518496541867711808fcc9a781663f | [] | no_license | gongfei6644/gongfei | 2beb082c56197bc23ca20a6927ff6c10d8beaa83 | bfdd5e6a3a8d76ad1e43cf54df186b944cad29e4 | refs/heads/master | 2022-11-30T20:49:22.213040 | 2020-08-16T12:52:28 | 2020-08-16T12:52:28 | 286,283,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,761 | py | from lxml import etree
import datetime
import time
import requests
import json
import re
import bs4
class KeParse:
def parse_xpath(self, html_str):
# print('======================html_str===========================')
dic = {}
if html_str['str'] == '404':
now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
dic['d_status'] = 0
dic['detail_time'] = now_time
return dic
if html_str['str'] and html_str['str'] != '404':
html = etree.HTML(html_str['str'])
# 楼盘名
dic["project_name"] = html.xpath('string(//div[contains(text(), "所属小区")]/following-sibling::*[1]/a/text())')
# 朝向
try:
dic['orientation'] = html.xpath("string(//node()[contains(text(),'房屋朝向:')]/following-sibling::div[1]/text())").strip()
except Exception as e:
dic['orientation'] = ''
# 装修
try:
dic['decoration'] = html.xpath("string(//node()[contains(text(),'装修程度:')]/following-sibling::div[1]/text())").strip()
except Exception as e:
# print(e)
dic['decoration'] = ''
# 解析有无电梯 //node()[text()='有无电梯']/following-sibling::*[1]/text()
try:
dic['is_elevator'] = html.xpath("//node()[text()='配套电梯:']/following-sibling::div[1]/text()")[0].strip()
except Exception as e:
dic['is_elevator'] = ''
# 解析产权性质
try:
dic['property_nature'] = html.xpath("//node()[text()='产权性质:']/following-sibling::div[1]/text()")[0].strip()
except Exception as e:
dic['property_nature'] = ''
# 住宅类别
try:
dic['usage'] = html.xpath("//node()[text()='房屋类型:']/following-sibling::div[1]/text()")[0].strip()
except Exception as e:
dic['usage'] = ''
# 建筑结构
try:
dic['building_structure'] = html.xpath("//node()[text()='建筑结构:']/following-sibling::div[1]/text()")[0].strip()
except Exception as e:
dic['building_structure'] = ''
# 建筑类别
try:
dic['building_type'] = html.xpath("//node()[text()='建筑类别:']/following-sibling::div[1]/text()")[0].strip()
except Exception as e:
dic['building_type'] = ''
# 挂牌时间 案例时间
# case_happen_date
try:
dic['case_happen_date'] = html.xpath("//node()[contains(text(),'发布时间')]/text()")[0].strip().split(':')[1]
array = time.strptime(dic['case_happen_date'], "%Y年%m月%d日")
# print(dic['case_happen_date'])
dic['case_happen_date'] = time.strftime("%Y-%m-%d", array)
except Exception as e:
print('case_happen_date', e)
dic['case_happen_date'] = ''
# 建筑年代
try:
dic['build_date'] = html.xpath("//node()[text()='建造年代:']/following-sibling::div[1]/text()")[0].strip()
except Exception as e:
dic['build_date'] = ''
# 小区配套
try:
dic['supporting_facilities'] = html.xpath("string(//node()[text()='小区配套']/following-sibling::div[1]/text())").strip()
except Exception as e:
dic['supporting_facilities'] = ''
# 联系电话
try:
dic['tel'] = html.xpath("string(//span[@id='mobilecode'])")
except Exception as e:
dic['tel'] = ''
# dic['test'] = 'mogu'
if dic['case_happen_date'] or dic['orientation'] or dic['build_date']:
now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
dic['d_status'] = 1
# dic['test'] = 1
dic['detail_time'] = now_time
if dic['is_elevator'] == '有':
if dic['supporting_facilities']:
dic['supporting_facilities'] = dic['supporting_facilities'] + ',电梯'
else:
dic['supporting_facilities'] = dic['supporting_facilities'] + '电梯'
print(11111, dic)
return dic
else:
now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
dic['d_status'] = 'err'
dic['detail_time'] = now_time
return dic | [
"1"
] | 1 |
536a06cfcaa0279e625af71aeacac292641c89e2 | 3a02bff6397eb23afd55cc17faf81c24a8751f2d | /sample/searchthu.py | ffe31e067214a28f9ee142f297252b6978367848 | [] | no_license | cothuyanninh/Python_Code | 909fd4d798cbd856e8993f9d4fea55b4b7c97a1f | 7f657db61845cf8c06725a2da067df526e696b93 | refs/heads/master | 2022-11-06T01:00:39.939194 | 2019-01-13T15:27:38 | 2019-01-13T15:27:38 | 164,468,626 | 0 | 1 | null | 2022-10-13T16:16:21 | 2019-01-07T17:40:51 | Python | UTF-8 | Python | false | false | 133 | py | import re
var_search = re.compile(r'\d+\s\w+')
result = var_search.findall('12 bananas , 11 apple , 10 nhan , 9 buoi')
print(result) | [
"[email protected]"
] | |
486ac0bfd66bce156a39da9463114fbf9533402e | d49d2daeb2c69ac40b2d5b688c5e935e7ea5a0c4 | /statsmodels/regression/tests/test_theil.py | 29d88de30dfca79be297d2d1d2bdd570faf31506 | [
"BSD-3-Clause"
] | permissive | twoertwein/statsmodels | 9243e39ec0ef0f0eab503dc612ece29ccc3c021b | a16b49cd997791878b2834bd73c8a38baa1f20ad | refs/heads/master | 2020-05-02T08:29:57.592000 | 2019-03-27T03:16:20 | 2019-03-27T03:16:20 | 177,844,483 | 0 | 0 | NOASSERTION | 2019-03-26T18:14:02 | 2019-03-26T18:14:02 | null | UTF-8 | Python | false | false | 13,584 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 05 17:29:56 2014
Author: Josef Perktold
"""
import os
import numpy as np
import pandas as pd
from scipy import stats
from numpy.testing import assert_allclose, assert_equal, assert_warns
from statsmodels.regression.linear_model import OLS, WLS, GLS
from statsmodels.tools.tools import add_constant
from statsmodels.tools.sm_exceptions import InvalidTestWarning
from statsmodels.sandbox.regression.penalized import TheilGLS
class TestTheilTextile(object):
@classmethod
def setup_class(cls):
cur_dir = os.path.dirname(os.path.abspath(__file__))
filepath = os.path.join(cur_dir, "results",
"theil_textile_predict.csv")
cls.res_predict = pd.read_csv(filepath, sep=",")
names = "year lconsump lincome lprice".split()
data = np.array('''\
1923 1.99651 1.98543 2.00432
1924 1.99564 1.99167 2.00043
1925 2 2 2
1926 2.04766 2.02078 1.95713
1927 2.08707 2.02078 1.93702
1928 2.07041 2.03941 1.95279
1929 2.08314 2.04454 1.95713
1930 2.13354 2.05038 1.91803
1931 2.18808 2.03862 1.84572
1932 2.18639 2.02243 1.81558
1933 2.20003 2.00732 1.78746
1934 2.14799 1.97955 1.79588
1935 2.13418 1.98408 1.80346
1936 2.22531 1.98945 1.72099
1937 2.18837 2.0103 1.77597
1938 2.17319 2.00689 1.77452
1939 2.2188 2.0162 1.78746'''.split(), float).reshape(-1, 4)
endog = data[:, 1]
# constant at the end to match Stata
exog = np.column_stack((data[:, 2:], np.ones(endog.shape[0])))
#prior(lprice -0.7 0.15 lincome 1 0.15) cov(lprice lincome -0.01)
r_matrix = np.array([[1, 0, 0], [0, 1, 0]])
r_mean = [1, -0.7]
cov_r = np.array([[0.15**2, -0.01], [-0.01, 0.15**2]])
mod = TheilGLS(endog, exog, r_matrix, q_matrix=r_mean, sigma_prior=cov_r)
cls.res1 = mod.fit(cov_type='data-prior', use_t=True)
#cls.res1._cache['scale'] = 0.0001852252884817586 # from tg_mixed
cls.res1._cache['scale'] = 0.00018334123641580062 # from OLS
from .results import results_theil_textile as resmodule
cls.res2 = resmodule.results_theil_textile
def test_basic(self):
pt = self.res2.params_table[:,:6].T
params2, bse2, tvalues2, pvalues2, ci_low, ci_upp = pt
assert_allclose(self.res1.params, params2, rtol=2e-6)
#TODO tgmixed seems to use scale from initial OLS, not from final res
# np.sqrt(res.scale / res_ols.scale)
# see below mse_resid which is equal to scale
corr_fact = 0.9836026210570028
corr_fact = 0.97376865041463734
corr_fact = 1
assert_allclose(self.res1.bse / corr_fact, bse2, rtol=2e-6)
assert_allclose(self.res1.tvalues * corr_fact, tvalues2, rtol=2e-6)
# pvalues are very small
#assert_allclose(self.res1.pvalues, pvalues2, atol=2e-6)
#assert_allclose(self.res1.pvalues, pvalues2, rtol=0.7)
ci = self.res1.conf_int()
# not scale corrected
assert_allclose(ci[:,0], ci_low, rtol=0.01)
assert_allclose(ci[:,1], ci_upp, rtol=0.01)
assert_allclose(self.res1.rsquared, self.res2.r2, rtol=2e-6)
# Note: tgmixed is using k_exog for df_resid
corr_fact = self.res1.df_resid / self.res2.df_r
assert_allclose(np.sqrt(self.res1.mse_resid * corr_fact),
self.res2.rmse, rtol=2e-6)
assert_allclose(self.res1.fittedvalues,
self.res_predict['fittedvalues'], atol=5e7)
def test_other(self):
tc = self.res1.test_compatibility()
assert_allclose(np.squeeze(tc[0]), self.res2.compat, rtol=2e-6)
assert_allclose(np.squeeze(tc[1]), self.res2.pvalue, rtol=2e-6)
frac = self.res1.share_data()
# TODO check again, I guess tgmixed uses final scale in hatmatrix
# but I'm not sure, it passed in previous version, but now we override
# scale with OLS scale
# assert_allclose(frac, self.res2.frac_sample, rtol=2e-6)
# regression tests:
assert_allclose(frac, 0.6946116246864239, rtol=2e-6)
def test_no_penalization(self):
res_ols = OLS(self.res1.model.endog, self.res1.model.exog).fit()
res_theil = self.res1.model.fit(pen_weight=0, cov_type='data-prior')
assert_allclose(res_theil.params, res_ols.params, rtol=1e-10)
assert_allclose(res_theil.bse, res_ols.bse, rtol=1e-10)
def test_smoke(self):
self.res1.summary()
class CheckEquivalenceMixin(object):
tol = {'default': (1e-4, 1e-20)}
@classmethod
def get_sample(cls):
np.random.seed(987456)
nobs, k_vars = 200, 5
beta = 0.5 * np.array([0.1, 1, 1, 0, 0])
x = np.random.randn(nobs, k_vars)
x[:, 0] = 1
y = np.dot(x, beta) + 2 * np.random.randn(nobs)
return y, x
def test_attributes(self):
attributes_fit = ['params', 'rsquared', 'df_resid', 'df_model',
'llf', 'aic', 'bic'
#'fittedvalues', 'resid'
]
attributes_inference = ['bse', 'tvalues', 'pvalues']
import copy
attributes = copy.copy(attributes_fit)
if not getattr(self, 'skip_inference', False):
attributes.extend(attributes_inference)
for att in attributes:
r1 = getattr(self.res1, att)
r2 = getattr(self.res2, att)
if not np.size(r1) == 1:
r1 = r1[:len(r2)]
# check if we have overwritten tolerance
rtol, atol = self.tol.get(att, self.tol['default'])
message = 'attribute: ' + att #+ '\n%r\n\%r' % (r1, r2)
assert_allclose(r1, r2, rtol=rtol, atol=atol, err_msg=message)
# models are not close enough for some attributes at high precision
assert_allclose(self.res1.fittedvalues, self.res1.fittedvalues,
rtol=1e-3, atol=1e-4)
assert_allclose(self.res1.resid, self.res1.resid,
rtol=1e-3, atol=1e-4)
class TestTheil1(CheckEquivalenceMixin):
# penalize last two parameters to zero
@classmethod
def setup_class(cls):
y, x = cls.get_sample()
mod1 = TheilGLS(y, x, sigma_prior=[0, 0, 1., 1.])
cls.res1 = mod1.fit(200000)
cls.res2 = OLS(y, x[:, :3]).fit()
class TestTheil2(CheckEquivalenceMixin):
# no penalization = same as OLS
@classmethod
def setup_class(cls):
y, x = cls.get_sample()
mod1 = TheilGLS(y, x, sigma_prior=[0, 0, 1., 1.])
cls.res1 = mod1.fit(0)
cls.res2 = OLS(y, x).fit()
class TestTheil3(CheckEquivalenceMixin):
# perfect multicollinearity = same as OLS in terms of fit
# inference: bse, ... is different
@classmethod
def setup_class(cls):
cls.skip_inference = True
y, x = cls.get_sample()
xd = np.column_stack((x, x))
#sp = np.zeros(5), np.ones(5)
r_matrix = np.eye(5, 10, 5)
mod1 = TheilGLS(y, xd, r_matrix=r_matrix) #sigma_prior=[0, 0, 1., 1.])
cls.res1 = mod1.fit(0.001, cov_type='data-prior')
cls.res2 = OLS(y, x).fit()
class TestTheilGLS(CheckEquivalenceMixin):
# penalize last two parameters to zero
@classmethod
def setup_class(cls):
y, x = cls.get_sample()
nobs = len(y)
weights = (np.arange(nobs) < (nobs // 2)) + 0.5
mod1 = TheilGLS(y, x, sigma=weights, sigma_prior=[0, 0, 1., 1.])
cls.res1 = mod1.fit(200000)
cls.res2 = GLS(y, x[:, :3], sigma=weights).fit()
class TestTheilLinRestriction(CheckEquivalenceMixin):
# impose linear restriction with small uncertainty - close to OLS
@classmethod
def setup_class(cls):
y, x = cls.get_sample()
#merge var1 and var2
x2 = x[:, :2].copy()
x2[:, 1] += x[:, 2]
#mod1 = TheilGLS(y, x, r_matrix =[[0, 1, -1, 0, 0]])
mod1 = TheilGLS(y, x[:, :3], r_matrix =[[0, 1, -1]])
cls.res1 = mod1.fit(200000)
cls.res2 = OLS(y, x2).fit()
# adjust precision, careful: cls.tol is mutable
tol = {'pvalues': (1e-4, 2e-7),
'tvalues': (5e-4, 0)}
tol.update(cls.tol)
cls.tol = tol
class TestTheilLinRestrictionApprox(CheckEquivalenceMixin):
# impose linear restriction with some uncertainty
@classmethod
def setup_class(cls):
y, x = cls.get_sample()
#merge var1 and var2
x2 = x[:, :2].copy()
x2[:, 1] += x[:, 2]
#mod1 = TheilGLS(y, x, r_matrix =[[0, 1, -1, 0, 0]])
mod1 = TheilGLS(y, x[:, :3], r_matrix =[[0, 1, -1]])
cls.res1 = mod1.fit(100)
cls.res2 = OLS(y, x2).fit()
# adjust precision, careful: cls.tol is mutable
import copy
tol = copy.copy(cls.tol)
tol2 = {'default': (0.15, 0),
'params': (0.05, 0),
'pvalues': (0.02, 0.001),
}
tol.update(tol2)
cls.tol = tol
class TestTheilPanel(object):
@classmethod
def setup_class(cls):
#example 3
nobs = 300
nobs_i = 5
n_groups = nobs // nobs_i
k_vars = 3
from statsmodels.sandbox.panel.random_panel import PanelSample
dgp = PanelSample(nobs, k_vars, n_groups, seed=303305)
# add random intercept, using same RandomState
dgp.group_means = 2 + dgp.random_state.randn(n_groups)
print('seed', dgp.seed)
y = dgp.generate_panel()
x = np.column_stack((dgp.exog[:,1:],
dgp.groups[:,None] == np.arange(n_groups)))
cls.dgp = dgp
cls.endog = y
cls.exog = x
cls.res_ols = OLS(y, x).fit()
def test_regression(self):
y = self.endog
x = self.exog
n_groups, k_vars = self.dgp.n_groups, self.dgp.k_vars
Rg = (np.eye(n_groups-1) - 1. / n_groups *
np.ones((n_groups - 1, n_groups-1)))
R = np.c_[np.zeros((n_groups - 1, k_vars)), Rg]
r = np.zeros(n_groups - 1)
R[:, k_vars-1] = -1
lambd = 1 #1e-4
mod = TheilGLS(y, x, r_matrix=R, q_matrix=r, sigma_prior=lambd)
res = mod.fit()
# regression test
params1 = np.array([
0.9751655 , 1.05215277, 0.37135028, 2.0492626 , 2.82062503,
2.82139775, 1.92940468, 2.96942081, 2.86349583, 3.20695368,
4.04516422, 3.04918839, 4.54748808, 3.49026961, 3.15529618,
4.25552932, 2.65471759, 3.62328747, 3.07283053, 3.49485898,
3.42301424, 2.94677593, 2.81549427, 2.24895113, 2.29222784,
2.89194946, 3.17052308, 2.37754241, 3.54358533, 3.79838425,
1.91189071, 1.15976407, 4.05629691, 1.58556827, 4.49941666,
4.08608599, 3.1889269 , 2.86203652, 3.06785013, 1.9376162 ,
2.90657681, 3.71910592, 3.15607617, 3.58464547, 2.15466323,
4.87026717, 2.92909833, 2.64998337, 2.891171 , 4.04422964,
3.54616122, 4.12135273, 3.70232028, 3.8314497 , 2.2591451 ,
2.39321422, 3.13064532, 2.1569678 , 2.04667506, 3.92064689,
3.66243644, 3.11742725])
assert_allclose(res.params, params1)
pen_weight_aicc = mod.select_pen_weight(method='aicc')
pen_weight_gcv = mod.select_pen_weight(method='gcv')
pen_weight_cv = mod.select_pen_weight(method='cv')
pen_weight_bic = mod.select_pen_weight(method='bic')
assert_allclose(pen_weight_gcv, pen_weight_aicc, rtol=0.1)
# regression tests:
assert_allclose(pen_weight_aicc, 4.77333984, rtol=1e-4)
assert_allclose(pen_weight_gcv, 4.45546875, rtol=1e-4)
assert_allclose(pen_weight_bic, 9.35957031, rtol=1e-4)
assert_allclose(pen_weight_cv, 1.99277344, rtol=1e-4)
def test_combine_subset_regression(self):
# split sample into two, use first sample as prior for second
endog = self.endog
exog = self.exog
nobs = len(endog)
n05 = nobs // 2
np.random.seed(987125)
# shuffle to get random subsamples
shuffle_idx = np.random.permutation(np.arange(nobs))
ys = endog[shuffle_idx]
xs = exog[shuffle_idx]
k = 10
res_ols0 = OLS(ys[:n05], xs[:n05, :k]).fit()
res_ols1 = OLS(ys[n05:], xs[n05:, :k]).fit()
w = res_ols1.scale / res_ols0.scale #1.01
mod_1 = TheilGLS(ys[n05:], xs[n05:, :k], r_matrix=np.eye(k),
q_matrix=res_ols0.params,
sigma_prior=w * res_ols0.cov_params())
res_1p = mod_1.fit(cov_type='data-prior')
res_1s = mod_1.fit(cov_type='sandwich')
res_olsf = OLS(ys, xs[:, :k]).fit()
assert_allclose(res_1p.params, res_olsf.params, rtol=1e-9)
corr_fact = np.sqrt(res_1p.scale / res_olsf.scale)
# corrct for differences in scale computation
assert_allclose(res_1p.bse, res_olsf.bse * corr_fact, rtol=1e-3)
# regression test, does not verify numbers
# especially why are these smaller than OLS on full sample
# in larger sample, nobs=600, those were close to full OLS
bse1 = np.array([
0.26589869, 0.15224812, 0.38407399, 0.75679949, 0.66084200,
0.54174080, 0.53697607, 0.66006377, 0.38228551, 0.53920485])
assert_allclose(res_1s.bse, bse1, rtol=1e-7)
| [
"[email protected]"
] | |
53d395e3b5cb5d29f3c216dd8eb2aef90a4c9986 | 4be2c72579486ad04a00db0349028de96d2dce89 | /scripts/Helios/commands/Convert Geometry to Bounding Box-advanced.py | c1dbb31dc25f280bfa2822fa5d0cfe3262be0517 | [] | no_license | italic-r/maya-prefs | 6a617d40beee8937186b4699c5cead44e01c2d40 | aa21e5e2938dc2698ce5f555ee74a594e08aed2b | refs/heads/master | 2021-09-09T16:31:00.411349 | 2018-03-18T01:40:10 | 2018-03-18T01:40:10 | 86,961,959 | 16 | 8 | null | null | null | null | UTF-8 | Python | false | false | 33 | py | mc.GeometryToBoundingBoxOptions() | [
"[email protected]"
] | |
90b98ddf5b956a0791e967c823e49214eac8d25c | f0f107abf24049ae8a798844fef89387020f6182 | /python/jittor/pyjt_compiler.py | 1bd643c4202a52b1d30485b550de890bda5ed15d | [
"Apache-2.0"
] | permissive | ABlueLight/jittor | 9dc90505fa79eec2f645202a0e7e46c69980db67 | 655f3cc0905f4cbff2a44a0f1151cb43168e7a20 | refs/heads/master | 2023-03-17T05:46:57.453546 | 2021-03-16T03:21:23 | 2021-03-16T03:21:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,635 | py | # ***************************************************************
# Copyright (c) 2021 Jittor. All Rights Reserved.
# Maintainers: Dun Liang <[email protected]>.
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import re
import os
from jittor_utils import LOG, run_cmd, simple_timer
import json
from collections import OrderedDict
def parse_attrs(s):
'''parse @attrs(..., x=y) syntax'''
attrs = {}
if s is None: return attrs
for a in s.split(','):
a = a.strip()
if len(a)==0: continue
if '=' in a:
k, v = a.split('=')
attrs[k] = v
else:
attrs[a] = 1
return attrs
pytype_map = {
"const char*": ["PyUnicode_AsUTF8", "PyUnicode_FromString", "PyUnicode_CheckExact"],
"int": ["PyLong_AsLong", "PyLong_FromLong", "PyLong_CheckExact"],
"int64": ["PyLong_AsLongLong", "PyLong_FromLongLong", "PyLong_CheckExact"],
"uint": ["PyLong_AsUnsignedLong", "PyLong_FromUnsignedLong", "PyLong_CheckExact"],
"uint64": ["PyLong_AsUnsignedLongLong", "PyLong_FromUnsignedLongLong", "PyLong_CheckExact"],
"void": ["...", "GET_PY_NONE", "..."],
"PyObject*": ["","",""],
}
def get_pytype_map(T, i):
assert T != ""
if T in pytype_map:
return pytype_map[T][i]
return ["from_py_object", "to_py_object", "is_type"][i]+"<"+T+">"
binary_number_slots = {
"__add__": "nb_add",
"__sub__": "nb_subtract",
"__mul__": "nb_multiply",
"__mod__": "nb_remainder",
"__divmod__": "nb_divmod",
"__pow__": "nb_power",
"__lshift__": "nb_lshift",
"__rshift__": "nb_rshift",
"__and__": "nb_and",
"__xor__": "nb_xor",
"__or__": "nb_or",
"__floordiv__": "nb_floor_divide",
"__truediv__": "nb_true_divide",
"__matmul__": "nb_matrix_multiply",
}
for k,v in list(binary_number_slots.items()):
# __add__: nb_add ----> __iadd: nb_inplace_add
binary_number_slots["__i"+k[2:]] = "nb_inplace"+v[2:]
unary_number_slots = {
"__neg__": "nb_negative",
"__abs__": "nb_absolute",
}
def split_args(s):
# split args xxx,xxx, xx<xx,xx>, xx
s = s.strip()
if s=="": return []
prev = -1
presum = 0
args = []
for i in range(len(s)):
if s[i]=='<':
presum += 1
elif s[i]=='>':
presum -= 1
if presum==0 and s[i]==',':
args.append(s[prev+1:i])
prev = i
args.append(s[prev+1:])
return args
def get_def_code(df, scope_name, pyname, self_as_arg0=False):
is_fast_call = not pyname.startswith("__")
no_need_convert = pyname == "__getitem__"
args = df["args"]
# n==1 && PyXXX__CheckExact(args[0]) && ...
max_args = len(args)
min_args = max_args
for tid, a in enumerate(args):
if a[2] != "":
min_args = tid
break
arg_names = [ f"args[{i}]" for i in range(len(args))]
if self_as_arg0:
max_args -= 1
min_args -= 1
arg_names = ["self"] + arg_names[:-1]
kw_args_id = []
for aid, arg in enumerate(args):
if "VarHolder*" != arg[0] and is_fast_call:
kw_args_id.append(aid)
func_quick_check_runable = ""
func_quick_check_size = f"n<={max_args} && n>={min_args}"
if len(kw_args_id):
func_quick_check_size = f"n+(kw?Py_SIZE(kw):0)<={max_args} && n+(kw?Py_SIZE(kw):0)>={min_args}"
fill_with_default = ""
func_args_convert = ""
func_call = df["func_name"]+"("
pytypes = [ get_pytype_map(a[0],0) for a in args ]
holder_dec_array = []
holder_set_array = []
for tid, tpc in enumerate(pytypes):
check = get_pytype_map(args[tid][0],2)
default_arg = args[tid][2]
jtp = args[tid][0]
holder_dec = ""
holder_set = ""
if jtp == "VarHolder*":
holder_dec = f"unique_ptr<VarHolder> arg{tid}_holder"
holder_set = f", arg{tid}_holder"
if jtp == "VarSlices":
holder_dec = f"vector<unique_ptr<VarHolder>> arg{tid}_holder"
holder_set = f", arg{tid}_holder"
holder_dec_array.append(holder_dec)
holder_set_array.append(holder_set)
if len(default_arg):
func_args_convert += f"""
{holder_dec};
{jtp} arg{tid};
if (n>{tid-self_as_arg0}) {{
CHECK(({check}({arg_names[tid]})));
arg{tid} = {tpc}({arg_names[tid]}{holder_set});
arg_filled |= 1ull << {tid};
}}
"""
fill_with_default += f"""
if (!(arg_filled & (1ull<<{tid}))) {{
arg{tid} = {default_arg};
}}
"""
else:
func_quick_check_runable += f" && {check}({arg_names[tid]})"
func_args_convert += f"""
{holder_dec};
{jtp} arg{tid} = {tpc}({arg_names[tid]}{holder_set});
"""
if tid: func_call += ","
if args[tid][3].endswith("&&"):
func_call += f"move(arg{tid})"
else:
func_call += f"arg{tid}"
if pyname == "__richcmp__":
for rname in [ "__lt__", "__le__", "__gt__",
"__ge__", "__eq__", "__ne__"]:
if rname in df["attrs"]:
func_quick_check_runable += " && op==Py_"+rname[2:-2].upper()
# fill args with keyword arguments
fill_with_kw = ""
if is_fast_call and len(kw_args_id):
fill_with_kw = f"""
if (kw) {{
auto kw_n = Py_SIZE(kw);
for (int i=0; i<kw_n; i++) {{
auto ko = PyTuple_GET_ITEM(kw, i);
auto vo = args[i+n];
auto ks = PyUnicode_AsUTF8(ko);
uint khash = hash(ks);
{"".join([
f'''
if (khash == {get_hash(args[aid][1])}u) {{
// hash match {args[aid][1]}
CHECK(({get_pytype_map(args[aid][0],2)}(vo)));
arg{aid} = {pytypes[aid]}(vo{holder_set_array[aid]});
arg_filled |= 1ull << {aid};
continue;
}}
'''
for aid in kw_args_id
])}
LOGf << "Not a valid keyword:" << ks;
}}
}}
"""
if len(args):
func_args_convert += """
CHECK(!PyErr_Occurred());
"""
func_call += ")"
if df["is_property"]:
if pyname.startswith("__get__"):
func_call = df["func_name"]
else:
assert pyname.startswith("__set__"), pyname
func_call = df["func_name"] + "= arg0"
has_return = df["return_t"]!="void" and df["return_t"]!=""
# add XXX::xxx or XXX->xxx if is class def
if df["is_scope_def"]:
if df["is_static"]:
func_call = f"{scope_name}::" + func_call
else:
func_call = f"(GET_RAW_PTR({scope_name},self))->" + func_call
if pyname == "__init__":
# XXX->xxx(...) ---> new XXX xxx(...)
assert "->" in func_call, func_call
func_call = "new " + func_call.replace("->", " ")
if no_need_convert:
func_quick_check_runable = ""
func_args_convert = ""
fill_with_kw = fill_with_default = ""
return (
func_quick_check_size + func_quick_check_runable,
func_args_convert,
fill_with_kw+fill_with_default,
func_call,
has_return
)
hash_to_key_map = {}
def get_hash(s):
mask = (1<<32)-1
v=0
mul = 1
for c in s:
v += mul * ord(c)
mul *= 55
v &= mask
mul &= mask
if v in hash_to_key_map:
assert hash_to_key_map[v] == s, \
f"hash conflict {hash_to_key_map[v]} {s} {hash_to_key_map}"
hash_to_key_map[v] = s
return v
reg = re.compile(
'(/\\*(.*?)\\*/\\s*)?(//\\s*@pyjt\\(([^\\n]*)\\)\\s*)'
# ^^^^^^^^^^^^^^^^^ ^^^^ ^^^^
# doc string $1 pyjt args $3
+
'(//\\s*@attrs\\(([^\\n]*)\\)\\s*)?'
# ^^^^^ ^^^^^^^
# attrs args $5
, re.DOTALL)
def generate_error_code_from_func_header(func_head, target_scope_name, name, dfs, basename, h, class_info):
# func_head is a string like:
# (PyObject* self, PyObject** args, int64 n, PyObject* kw) -> PyObject*
lib_name = os.path.basename(h).split("_")[0]
# TODO: fix/add var help
if target_scope_name == "Var": target_scope_name = None
if target_scope_name:
if target_scope_name == "flags":
help_name = "flags"
else:
help_name = ""+target_scope_name+'.'+name
else:
help_name = name
if lib_name in ["mpi", "nccl", "cudnn", "curand", "cublas", "mkl"]:
help_name = lib_name+'.'+help_name
help_cmd = f"help(jt.{help_name})"
LOG.vvv("gen err from func_head", func_head)
args = func_head[1:].split(")")[0].split(",")
error_code = f" << \"Wrong inputs arguments, Please refer to examples({help_cmd}).\""
error_code += r' << "\n\nTypes of your inputs are:\n"'
for arg in args:
arg = arg.strip()
if arg.startswith("PyObject* "):
t, n = arg.split(' ')
if n == "args" or n == "_args":
error_code += f" << PyTupleArgPrinter{{{n}, \"args\"}} "
elif n == "kw":
error_code += f" << PyKwArgPrinter{{{n}}} "
else:
error_code += f" << PyArgPrinter{{{n}, \"{n}\"}} "
elif arg.startswith("PyObject** "):
t, n = arg.split(' ')
error_code += f" << PyFastCallArgPrinter{{{n}, n, kw}} "
break
else:
LOG.vvv("Unhandled arg", arg)
LOG.vvv("gen err from func_head", func_head, " -> ", error_code)
return error_code
def compile_src(src, h, basename):
res = list(reg.finditer(src, re.S))
if len(res)==0: return
class_ranges = None
class_name = None
class_info = None
submodule_name = None
submodule_ranges = None
submodule_info = None
defs = []
LOG.vv(("find in", h))
for x in res:
LOG.vvv((x, x.groups()))
g = x.groups()
doc = g[1]
pyjt = g[3]
attrs = g[5]
esplit = lambda x: [] if x==None else \
[ a.strip() for a in x.split(",") if len(a.strip()) ]
attrs = parse_attrs(attrs)
pynames = esplit(pyjt)
end = x.end()
def find_bc(i):
while src[i] not in "({;":
i += 1
j = i+1
if src[i]==';':
return i, j
presum = 1
while True:
if src[j] in "({[":
presum += 1
elif src[j] in ")}]":
presum -= 1
if presum==0:
s = src[i]+src[j]
assert s in ("()","{}","()"), "braces not match "+s
return i, j
j += 1
# // @pyjt(DType)
# struct DType {
# ^ --> a
# .....
# } <--- b
# or
# // @pyjt(hash)
# inline uint hash(const char* input)
# ^ --> a ^ --> b
a, b = find_bc(end)
is_property = 0
if src[a] == ';':
# This case
# class XXX {
# // @pyjt(property)
# T property;
# }
is_property = 1
if src[a] == '{':
assert len(pynames)==1
if "submodule" in attrs:
assert submodule_ranges==None
submodule_ranges = (a, b)
submodule_name = src[end:a-1].strip().split()[-1]
submodule_info = {
"pynames": pynames,
"attrs": attrs
}
continue
assert class_ranges==None
class_ranges = (a, b)
class_name = src[end:a-1].strip().split()[-1]
class_info = {
"pynames": pynames,
"attrs": attrs
}
continue
is_scope_def = False
is_static = False
scope_name = ""
if class_ranges != None:
if class_ranges[0] < a and a < class_ranges[1]:
is_scope_def = True
scope_name = class_name
if submodule_ranges != None:
if submodule_ranges[0] < a and a < submodule_ranges[1]:
is_scope_def = True
scope_name = submodule_name
is_static = True
dec = src[end:b+1].strip()
arr = src[end:a].strip().split()
func_name = arr[-1]
is_constructor = False
if is_scope_def and func_name==class_name:
is_constructor = True
args = []
for arg in split_args(src[a+1:b]):
if arg=="": continue
default = ""
if "=" in arg:
arg, default = arg.split('=')
default = default
arg = arg.strip()
name = arg.split(' ')[-1]
tp = arg[:-len(name)]
tp = tp.strip()
prev_tp = tp
# const string& ----> string
if tp.startswith("const") and tp.endswith("&"):
tp = tp[5:-1].strip()
# T&& -> T
if tp.endswith("&&"):
tp = tp[:-2].strip()
# ArrayArgs& -> ArrayArgs
if tp.endswith("&"):
tp = tp[:-1].strip()
args.append((tp, name.strip(), default.strip(), prev_tp))
return_t = ""
for a in arr[:-1]:
if a in ["", "inline", "constexpr"]: continue
if a == "static":
is_static = True
continue
if return_t != "": return_t += " "
return_t += a
if is_scope_def and class_info and "submodule" in class_info["attrs"]:
is_static = True
for pid, pyname in enumerate(pynames):
for rname in [ "__lt__", "__le__", "__gt__",
"__ge__", "__eq__", "__ne__"]:
if pyname.endswith(rname):
attrs[rname] = 1
pynames[pid] = pyname.replace(rname, "__richcmp__")
def_info = {
"is_scope_def": is_scope_def,
"is_constructor": is_constructor,
"is_static": is_static,
"is_property": is_property,
"func_name": func_name,
"args": args, # [(type,name,defaut), ...]
"return_t": return_t, # return type
"dec": dec, # full string of xxx(A a, B b)
"pynames": pynames, # names in @pyjt(...)
"attrs": attrs, # attrs in @attrs(...)
"doc": doc,
"scope_name": scope_name,
}
if is_property:
# This case
# class XXX {
# // @pyjt(property)
# T property;
# }
assert is_scope_def and not is_static
def_info["is_property"] = 1
def_info["pynames"] = ["__get__"+n for n in pynames]
assert return_t != "void"
defs.append(dict(def_info))
def_info["pynames"] = ["__set__"+n for n in pynames]
assert len(args) == 0
def_info["args"] = [(def_info["return_t"], func_name, "", "")]
def_info["return_t"] = "void"
defs.append(dict(def_info))
continue
else:
defs.append(def_info)
LOG.vvv(lambda: json.dumps(def_info, indent=4))
# deal with defs
if len(defs) == 0: return
# include_name = h[4:] # remove "src/" prefix
include_name = h
code = []
class_defs_code = []
class_getsets_code = []
class_gets = OrderedDict()
class_sets = OrderedDict()
class_slots_code = []
submodule_defs_code = []
def_targets = OrderedDict()
for df in defs:
for name in df["pynames"]:
if df["is_scope_def"] and '.' not in name:
if df["scope_name"] == class_name:
name = class_info["pynames"][0] + '.' + name
else:
name = submodule_info["pynames"][0] + '.' + name
if name not in def_targets:
def_targets[name] = []
def_targets[name].append(df)
for name in def_targets:
dfs = def_targets[name]
target_scope_name = None
LOG.vv(name)
if "." in name:
target_scope_name, name = name.split(".")
# array for each df:
arr_func_quick_check_runable = []
arr_func_args_convert = []
arr_fill_with_default = []
arr_func_call = []
arr_has_return = []
self_as_arg0 = False
for df in dfs:
self_as_arg0 = class_info and \
target_scope_name == class_info["pynames"][0] and \
df["scope_name"] == submodule_name \
and not name.startswith("__")
res = get_def_code(df, df["scope_name"], name, bool(self_as_arg0))
arr_func_quick_check_runable.append(res[0])
arr_func_args_convert.append(res[1])
arr_fill_with_default.append(res[2])
arr_func_call.append(res[3])
arr_has_return.append(res[4])
slot_name = None
func_cast = ""
func_fill = ""
before_return = ""
if name == "__init__":
slot_name = "tp_init"
func_head = "(PyObject* self, PyObject* _args, PyObject* kw) -> int"
func_fill = """
int64 n = Py_SIZE(_args);
auto args = (PyObject**)&PyTuple_GET_ITEM(_args, 0);
(void)n, (void)args;
// TODO: support kw
CHECK(kw==0);
"""
elif name == "__repr__":
slot_name = "tp_repr"
func_head = "(PyObject* self) -> PyObject*"
func_fill = "int64 n = 0; (void)n;"
elif name.startswith("__get__"):
slot_name = "tp_gets"
name = name[len("__get__"):]
func_head = "(PyObject* self, void*) -> PyObject*"
func_fill = "int64 n = 0; (void)n;"
elif name.startswith("__set__"):
slot_name = "tp_sets"
name = name[len("__set__"):]
func_head = "(PyObject* self, PyObject* arg, void*) -> int"
func_fill = """
int64 n=1;
PyObject** args = &arg;
(void)n, (void)args;
"""
elif name == "__call__":
slot_name = "tp_call"
func_head = "(PyObject* self, PyObject* _args, PyObject* kw) -> PyObject*"
func_fill = """
int64 n = Py_SIZE(_args);
auto args = (PyObject**)&PyTuple_GET_ITEM(_args, 0);
(void)n, (void)args;
// TODO: support kw
CHECK(kw==0);
"""
elif name == "__dealloc__":
slot_name = "tp_dealloc"
func_head = "(PyObject* self) -> void"
func_fill = "int64 n = 0"
before_return = "Py_TYPE(self)->tp_free((PyObject *) self);"
elif name in binary_number_slots:
slot_name = "tp_as_number->"+binary_number_slots[name]
func_head = "(PyObject* self, PyObject* b) -> PyObject*"
if name.endswith("pow__"):
func_head = "(PyObject* self, PyObject* b, PyObject*) -> PyObject*"
func_fill = """
int64 n = 2;
PyObject* args[] = {self, b};
(void)n, (void)args;
"""
elif name in unary_number_slots:
slot_name = "tp_as_number->"+unary_number_slots[name]
func_head = "(PyObject* self) -> PyObject*"
func_fill = """
int64 n = 1;
PyObject* args[] = {self};
(void)n, (void)args;
"""
elif name == "__richcmp__":
slot_name = "tp_richcompare"
func_head = "(PyObject* self, PyObject* b, int op) -> PyObject*"
func_fill = """
int64 n = 2;
PyObject* args[] = {self, b};
(void)n, (void)args;
"""
elif name == "__len__":
slot_name = "tp_as_sequence->sq_length"
func_head = "(PyObject* self) -> Py_ssize_t"
func_fill = """
int64 n = 0;
(void)n;
"""
elif name == "__map_len__":
slot_name = "tp_as_mapping->mp_length"
func_head = "(PyObject* self) -> Py_ssize_t"
func_fill = """
int64 n = 0;
(void)n;
"""
elif name == "__getitem__":
slot_name = "tp_as_sequence->sq_item"
func_head = "(PyObject* self, Py_ssize_t arg0) -> PyObject*"
func_fill = f"""
int64 n = 1;
(void)n;
if (arg0 >= GET_RAW_PTR({dfs[0]["scope_name"]},self)->size()) {{
PyErr_SetString(PyExc_IndexError, "");
return 0;
}}
"""
elif name == "__map_getitem__":
slot_name = "tp_as_mapping->mp_subscript"
func_head = "(PyObject* self, PyObject* arg0) -> PyObject*"
func_fill = f"""
int64 n = 1;
PyObject* args[] = {{arg0}};
(void)n;
"""
elif name.startswith("__"):
LOG.f(f"Not support slot {name}")
continue
else:
func_head = "(PyObject* self, PyObject** args, int64 n, PyObject* kw) -> PyObject*"
func_cast = f"(PyCFunction)(PyObject* (*)(PyObject*,PyObject**,int64,PyObject*))"
# if not return, return py_none
arr_has_return = [ True for _ in arr_has_return ]
arr_func_return = []
doc_all = ""
decs = "The function declarations are:\n"
for did, has_return in enumerate(arr_has_return):
df = dfs[did]
func_call = arr_func_call[did]
if df["doc"]:
doc_all += "Document:\n"
doc_all += df["doc"]
doc_all += "\nDeclaration:\n"
doc_all += df["dec"]
decs += " " + df["dec"]+'\n'
if has_return:
assert "-> int" not in func_head
if "-> PyObject*" in func_head:
if "return_self" in df["attrs"]:
arr_func_return.append(
f"return (({func_call}), Py_INCREF(self), self)")
else:
arr_func_return.append(
f"return {get_pytype_map(df['return_t'],1)}(({func_call}))")
func_return_failed = "return nullptr"
else:
arr_func_return.append(
f"return ({func_call});")
func_return_failed = "return -1"
else:
if "-> int" in func_head:
arr_func_return.append(f"return ({func_call},0)")
func_return_failed = "return -1"
else:
assert "-> void" in func_head
arr_func_return.append(f"{func_call};{before_return}return")
func_return_failed = "return"
# generate error msg when not a valid call
error_log_code = generate_error_code_from_func_header(func_head, target_scope_name, name, dfs, basename ,h, class_info)
func = f"""
{func_cast}[]{func_head} {{
try {{
{func_fill};
uint64 arg_filled=0;
(void)arg_filled;
{"".join([f'''
if ({arr_func_quick_check_runable[did]}) {{
{arr_func_args_convert[did]};
{arr_fill_with_default[did]};
{arr_func_return[did]};
}}
'''
for did in range(len(arr_func_return))
])}
LOGf << "Not a valid call.";
}} catch (const std::exception& e) {{
if (!PyErr_Occurred()) {{
std::stringstream ss;
ss {error_log_code};
PyErr_Format(PyExc_RuntimeError,
"%s\\n%s\\nFailed reason:%s",
ss.str().c_str(),
R""({decs})"",
e.what()
);
}}
}}
{func_return_failed};
}}
"""
if slot_name:
if slot_name=="tp_gets":
class_gets[name] = {
"func": func,
"doc": doc_all
}
continue
if slot_name=="tp_sets":
class_sets[name] = {
"func": func,
"doc": ""
}
continue
class_slots_code.append(f"""
tp.{slot_name} = {func};
""")
continue
need_static = ""
if df["is_scope_def"] and df["is_static"] and \
df["scope_name"] == class_name and \
"submodule" not in class_info["attrs"]:
need_static = " | METH_STATIC"
func = (f"""
{{ R""({name})"",
{func},
METH_FASTCALL | METH_KEYWORDS{need_static},
R""({doc_all})""
}}""")
if df["is_scope_def"]:
if df["scope_name"] == class_name or \
(class_info and \
target_scope_name == class_info["pynames"][0]):
class_defs_code.append(func)
else:
submodule_defs_code.append(func)
else:
code.append(func)
prop_names = list(set(class_gets.keys()).union(class_sets.keys()))
prop_names = sorted(prop_names)
for prop_name in prop_names:
get_func = "NULL"
set_func = "NULL"
doc = ""
if prop_name in class_gets:
get_func = class_gets[prop_name]["func"]
if class_gets[prop_name]["doc"]:
doc += class_gets[prop_name]["doc"]
if prop_name in class_sets:
set_func = class_sets[prop_name]["func"]
if class_sets[prop_name]["doc"]:
doc += class_sets[prop_name]["doc"]
class_getsets_code.append(f"""
{{"{prop_name}", {get_func}, {set_func}, R""({doc})""}}
""")
code.append("{0,0,0,0}")
class_defs_code.append("{0,0,0,0}")
class_getsets_code.append("{0,0,0,0}")
submodule_defs_code.append("{0,0,0,0}")
core_name = "jittor_core"
if class_info and "attrs" in class_info and "core_name" in class_info["attrs"]:
core_name = class_info["attrs"]["core_name"]
if submodule_info and "attrs" in submodule_info and "core_name" in submodule_info["attrs"]:
core_name = submodule_info["attrs"]["core_name"]
has_map = class_name in ["VarHolder", "NanoVector"]
has_seq = class_name == "NanoVector"
# add extra include to avoid compile error
src_code = ""
if include_name.endswith("var_slices.h"):
src_code += '#include "var_holder.h"\n'
src_code += f"""
#include "pyjt/py_converter.h"
#include "pyjt/py_arg_printer.h"
#include "common.h"
#include "{include_name}"
namespace jittor {{
{
"" if class_name is None else
f"PyHeapTypeObject Pyjt{class_name};" if "heaptype" in class_info["attrs"] else
f"PyTypeObject Pyjt{class_name};"
}
void pyjt_def_{basename}(PyObject* m) {{
static PyMethodDef defs[] = {{
{",".join(code)}
}};
ASSERT(PyModule_AddFunctions(m, defs)==0);
{
f'''
static PyMethodDef class_defs[] = {{
{",".join(class_defs_code)}
}};
static PyGetSetDef class_getsets[] = {{
{",".join(class_getsets_code)}
}};
static PyNumberMethods number_methods = {{0}};
{f"auto& htp =Pyjt{class_name}; auto& tp = htp.ht_type;"
if "heaptype" in class_info["attrs"] else
f"auto& tp = Pyjt{class_name};"}
tp.tp_as_number = &number_methods;
{f"static PyMappingMethods class_map_defs = {{0}};" if has_map else ""}
{f"tp.tp_as_mapping = &class_map_defs;" if has_map else ""}
{f"static PySequenceMethods class_seq_defs = {{0}};" if has_seq else ""}
{f"tp.tp_as_sequence = &class_seq_defs;" if has_seq else ""}
tp.tp_name = "{core_name}.{class_info["pynames"][0]}";
tp.tp_basicsize = GET_OBJ_SIZE({class_name});
tp.tp_new = PyType_GenericNew;
tp.tp_flags = Py_TPFLAGS_DEFAULT;
{"tp.tp_flags |= Py_TPFLAGS_HEAPTYPE; htp.ht_name = htp.ht_qualname = to_py_object<string>(tp.tp_name);"
if "heaptype" in class_info["attrs"] else ""}
tp.tp_methods = &class_defs[0];
tp.tp_getset = &class_getsets[0];
{"".join(class_slots_code)};
ASSERT(0==PyType_Ready(&tp)) << (PyErr_Print(), 0);
Py_INCREF(&tp);
ASSERT(0==PyModule_AddObject(m, "{class_info["pynames"][0]}", (PyObject*)&tp));
''' if class_name is not None else ""
}
{f'''
// sub module def
static PyMethodDef submodule_defs[] = {{
{",".join(submodule_defs_code)}
}};
auto sub = PyImport_AddModule("{core_name}.{submodule_info["pynames"][0]}");
ASSERT(PyModule_AddFunctions(sub, submodule_defs)==0);
ASSERT(sub);
ASSERT(0==PyModule_AddObject(m, "{submodule_info["pynames"][0]}", sub));
''' if submodule_name is not None else ""
}
}}
}}
"""
return src_code
def compile_single(head_file_name, src_file_name, src=None):
basename = head_file_name.split("/")[-1].split(".")[0]
if src==None:
with open(head_file_name, 'r') as f:
src = f.read()
code = compile_src(src, head_file_name, basename)
if not code: return False
LOG.vvv("write to", src_file_name)
LOG.vvvv(code)
with open(src_file_name, 'w') as f:
f.write(code)
return True
def compile(cache_path, jittor_path):
headers1 = run_cmd('find -L src/ | grep ".h$"', jittor_path).splitlines()
headers2 = run_cmd('find gen/ | grep ".h$"', cache_path).splitlines()
headers = [ os.path.join(jittor_path, h) for h in headers1 ] + \
[ os.path.join(cache_path, h) for h in headers2 ]
basenames = []
pyjt_names = []
for h in headers:
with open(h, 'r') as f:
src = f.read()
# jit_op_maker.h merge compile with var_holder.h
if h.endswith("src/var_holder.h"): continue
if h.endswith("jit_op_maker.h"):
with open(os.path.join(jittor_path, "src", "var_holder.h"), "r") as f:
src = f.read() + src
basename = h.split("/")[-1].split(".")[0]
fname = "pyjt_"+basename+".cc"
fname = os.path.join(cache_path, "gen", fname)
check = compile_single(h, fname, src)
if not check: continue
basenames.append(basename)
pyjt_names.append(fname)
code = f"""
#include "pyjt/numpy.h"
#include "pyjt/py_converter.h"
#include "common.h"
namespace jittor {{
{ " ".join([f"extern void pyjt_def_{n}(PyObject* m);" for n in basenames])}
void pyjt_def_all(PyObject* m) {{
numpy_init();
{ " ".join([f"pyjt_def_{n}(m);" for n in basenames])}
}}
}}
"""
fname = os.path.join(cache_path, "gen", "pyjt_all.cc")
LOG.vvv(("write to", fname))
LOG.vvvv(code)
with open(fname, "w") as f:
f.write(code)
pyjt_names.append(fname)
return pyjt_names
| [
"[email protected]"
] | |
b6dc92ec77e8df5536fba11972c47407da157815 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_wolfs.py | 194d3b5e802251036f20fdcd3d81d9b1e51c8aff | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
from xai.brain.wordbase.nouns._wolf import _WOLF
#calss header
class _WOLFS(_WOLF, ):
def __init__(self,):
_WOLF.__init__(self)
self.name = "WOLFS"
self.specie = 'nouns'
self.basic = "wolf"
self.jsondata = {}
| [
"[email protected]"
] | |
966d01d06a1e477526327db6dcde20f9bde29880 | 8fcc27160f8700be46296568260fa0017a0b3004 | /client/trinutils/driverutils.py | 60e804fa0f9f79cb0f8da31d7f522c627b05356c | [] | no_license | connoryang/dec-eve-serenity | 5d867f4eedfa896a4ef60f92556356cafd632c96 | b670aec7c8b4514fc47cd52e186d7ccf3aabb69e | refs/heads/master | 2021-01-22T06:33:16.303760 | 2016-03-16T15:15:32 | 2016-03-16T15:15:32 | 56,389,750 | 1 | 0 | null | 2016-04-16T15:05:24 | 2016-04-16T15:05:24 | null | UTF-8 | Python | false | false | 1,056 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\trinutils\driverutils.py
import trinity
class CannotIdentifyDriverException(Exception):
def __init__(self, vendor, description = 'NA'):
msg = str("Unable to retrieve info from %s card. Please ensure that you're using the right drivers or graphics card. /nDriver Description: %s" % (vendor, description))
super(Exception, self).__init__(self, msg)
def GetDriverVersion():
adapter = trinity.adapters.GetAdapterInfo(trinity.adapters.DEFAULT_ADAPTER)
if 'nvidia' not in adapter.description.lower():
raise CannotIdentifyDriverException('Unknown', adapter.description)
try:
info = adapter.GetDriverInfo()
except trinity.ALError:
raise CannotIdentifyDriverException('NVidia', adapter.description)
def getDriverVersionNumber(driverInfo):
verInfo = driverInfo.driverVersionString.replace('.', '')
return int(verInfo[-5:])
return getDriverVersionNumber(info)
| [
"[email protected]"
] | |
68b975e902a1a409aca00a40b7899bf7cd971a86 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02414/s302351716.py | bb38a4daf864560c044955692f3b79691df53c03 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | n, m, l = map(int, input().split())
a = [[int(num) for num in input().split()] for i in range(n)]
b = [[int(num) for num in input().split()] for i in range(m)]
c = [[0 for i in range(l)] for j in range(n)]
for i in range(l):
for j in range(n):
for k in range(m):
c[j][i] += a[j][k] * b[k][i]
for i in range(n):
for j in range(l):
if j == l - 1:
print(c[i][j])
else:
print("{0} ".format(c[i][j]), end = "") | [
"[email protected]"
] | |
0f480898fc7e05f7a44d92e63088b3271791e455 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_pulped.py | a81b741dbf00569f2d280aaa839f8c0f8a92acd4 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
from xai.brain.wordbase.nouns._pulp import _PULP
#calss header
class _PULPED(_PULP, ):
def __init__(self,):
_PULP.__init__(self)
self.name = "PULPED"
self.specie = 'nouns'
self.basic = "pulp"
self.jsondata = {}
| [
"[email protected]"
] | |
8c1cd566586a63f35f411d1809372f857c3002bf | 9b07e3fc9436f876a426bf5b02d07733de10d775 | /tests/port_tests/contour_tests/test_is_clockwise.py | 40740d612c4f63110a63c061f91b4ad3f9c81745 | [
"MIT"
] | permissive | lycantropos/martinez | 019e859ec513cc7ad38901e22dff8e897615776c | 86db48324cb50ecb52be8ab2e4278a6d5cdd562b | refs/heads/master | 2021-07-10T04:19:23.372706 | 2020-11-28T00:58:47 | 2020-11-28T00:58:47 | 224,819,004 | 7 | 1 | MIT | 2020-12-20T15:47:17 | 2019-11-29T09:16:26 | Python | UTF-8 | Python | false | false | 927 | py | from hypothesis import given
from tests.port_tests.hints import PortedContour
from tests.utils import implication
from . import strategies
@given(strategies.contours)
def test_basic(contour: PortedContour) -> None:
result = contour.is_clockwise
assert isinstance(result, bool)
@given(strategies.contours)
def test_empty(contour: PortedContour) -> None:
assert implication(not contour.points, not contour.is_clockwise)
@given(strategies.contours)
def test_reversed(contour: PortedContour) -> None:
reversed_contour = PortedContour(contour.points[::-1], contour.holes,
contour.is_external)
assert implication(bool(contour.points),
contour.is_clockwise is not reversed_contour)
@given(strategies.contours)
def test_alternatives(contour: PortedContour) -> None:
assert implication(contour.is_clockwise, not contour.is_counterclockwise)
| [
"[email protected]"
] | |
1db4444ba0313b6d08df9774f979296fed694d05 | f3abfb8d187d24a138aa7dca1b209f2881c5dfe9 | /src/normalize_punctuation.py | 66a0b59c9bee6caffca0103460658e2221b1dd93 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | StevenLOL/mtnt | 48fdfedf6d6eb7f08e770317a0cd4a6db40b6a41 | 1781119df75378c4e2db12315adfbbfa66031f4d | refs/heads/master | 2020-03-28T03:23:58.176606 | 2018-08-31T16:02:01 | 2018-08-31T16:02:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,942 | py | # -*- coding: utf-8 -*-
import re
def normalize_punctuation(s):
"""Adapted from https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/normalize-punctuation.perl"""
s = re.sub(r"\r", r"", s)
# remove extra spaces
s = re.sub(r"\(", r" \(", s)
s = re.sub(r"\)", r"\) ", s)
s = re.sub(r" +", r" ", s)
s = re.sub(r"\) ([\.\!\:\?\;\,])", r"\)$1", s)
s = re.sub(r"\( ", r"\(", s)
s = re.sub(r" \)", r"\)", s)
s = re.sub(r"(\d) \%", r"$1\%", s)
s = re.sub(r" :", r":", s)
s = re.sub(r" ;", r";", s)
s = re.sub(r"„", r'"', s)
s = re.sub(r"“", r'"', s)
s = re.sub(r"”", r'"', s)
s = re.sub(r"–", r"-", s)
s = re.sub(r"—", r" - ", s)
s = re.sub(r" +", r" ", s)
s = re.sub(r"´", r"'", s)
s = re.sub(r"([a-z])‘([a-z])", r"\1'\2", s)
s = re.sub(r"([a-z])’([a-z])", r"\1'\2", s)
s = re.sub(r"‘", r'"', s)
s = re.sub(r"‚", r'"', s)
s = re.sub(r"’", r'"', s)
s = re.sub(r"''", r'"', s)
s = re.sub(r"´´", r'"', s)
s = re.sub(r"…", r"...", s)
# French quotes
s = re.sub(r" « ", r' "', s)
s = re.sub(r"« ", r'"', s)
s = re.sub(r"«", r'"', s)
s = re.sub(r" » ", r'" ', s)
s = re.sub(r" »", r'"', s)
s = re.sub(r"»", r'"', s)
# handle pseudo-spaces
s = re.sub(r" \%", r"\%", s)
s = re.sub(r"nº ", r"nº ", s)
s = re.sub(r" :", r":", s)
s = re.sub(r" ºC", r" ºC", s)
s = re.sub(r" cm", r" cm", s)
s = re.sub(r" \?", r"\?", s)
s = re.sub(r" \!", r"\!", s)
s = re.sub(r" ;", r";", s)
s = re.sub(r", ", r", ", s)
s = re.sub(r" +", r" ", s)
# English "quotation," followed by comma, style
re.sub(r'"([,\.]+)', r'\1"', s)
re.sub(r"(\d) (\d)", r"$1.$2", s)
return s
if __name__ == '__main__':
print(normalize_punctuation("“what’s up?”, he said"))
| [
"[email protected]"
] | |
62470a4c3f7327cb8d1e809999078427266551ba | 4b7e282fe480415f5d52c0fc0429f144156190fe | /google/ads/googleads/v7/services/services/billing_setup_service/transports/grpc.py | e9ab412e07c9398dde9890e83efe494d5e3b6225 | [
"Apache-2.0"
] | permissive | Z2Xsoft/google-ads-python | c4750357bb19da91bb3b6bf2fa84bef9d2df36d3 | 1779d52a0446c8afb2437b0a9e103dcb849f5590 | refs/heads/main | 2023-08-18T15:22:17.840364 | 2021-09-26T04:08:53 | 2021-09-26T04:08:53 | 410,444,398 | 0 | 0 | Apache-2.0 | 2021-09-26T04:08:53 | 2021-09-26T03:55:38 | null | UTF-8 | Python | false | false | 12,273 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v7.resources.types import billing_setup
from google.ads.googleads.v7.services.types import billing_setup_service
from .base import BillingSetupServiceTransport, DEFAULT_CLIENT_INFO
class BillingSetupServiceGrpcTransport(BillingSetupServiceTransport):
"""gRPC backend transport for BillingSetupService.
A service for designating the business entity responsible for
accrued costs.
A billing setup is associated with a payments account. Billing-
related activity for all billing setups associated with a
particular payments account will appear on a single invoice
generated monthly.
Mutates:
The REMOVE operation cancels a pending billing setup. The CREATE
operation creates a new billing setup.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_billing_setup(
self,
) -> Callable[
[billing_setup_service.GetBillingSetupRequest],
billing_setup.BillingSetup,
]:
r"""Return a callable for the
get billing setup
method over gRPC.
Returns a billing setup.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetBillingSetupRequest],
~.BillingSetup]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_billing_setup" not in self._stubs:
self._stubs["get_billing_setup"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v7.services.BillingSetupService/GetBillingSetup",
request_serializer=billing_setup_service.GetBillingSetupRequest.serialize,
response_deserializer=billing_setup.BillingSetup.deserialize,
)
return self._stubs["get_billing_setup"]
@property
def mutate_billing_setup(
self,
) -> Callable[
[billing_setup_service.MutateBillingSetupRequest],
billing_setup_service.MutateBillingSetupResponse,
]:
r"""Return a callable for the
mutate billing setup
method over gRPC.
Creates a billing setup, or cancels an existing billing setup.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `BillingSetupError <>`__
`DateError <>`__ `FieldError <>`__ `HeaderError <>`__
`InternalError <>`__ `MutateError <>`__ `QuotaError <>`__
`RequestError <>`__
Returns:
Callable[[~.MutateBillingSetupRequest],
~.MutateBillingSetupResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_billing_setup" not in self._stubs:
self._stubs["mutate_billing_setup"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v7.services.BillingSetupService/MutateBillingSetup",
request_serializer=billing_setup_service.MutateBillingSetupRequest.serialize,
response_deserializer=billing_setup_service.MutateBillingSetupResponse.deserialize,
)
return self._stubs["mutate_billing_setup"]
__all__ = ("BillingSetupServiceGrpcTransport",)
| [
"[email protected]"
] | |
87001b3f155608be19566116d9d103185444880e | 817712488d5b36c85596e42a0b7178b4a79f95f0 | /knowledge_point/git/常用Git命令.py | d213af979aee72bd03c680e4e7575c1008a07fd6 | [] | no_license | TigerZhao007/GitHub | cfa1dcc5589988a18b0f7661e9f72b8088f95ec2 | 5e08f6a0e4033778f107cc89b422ab107082e4ab | refs/heads/master | 2020-10-02T01:25:39.251198 | 2019-12-12T12:06:25 | 2019-12-12T12:06:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,007 | py |
# ######################################################################################################################
# GIT相关基础命令
# ######################################################################################################################
# Git pull 强制拉取并覆盖本地代码~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 两个电脑同时对git上的项目进行跟新时,不免要用到将git上的代码拉取到本地更新本地代码的操作,
# 鉴于自己对git使用的还不是很熟练,所以就直接采取暴力的方法,直接拉取并覆盖本地的所有代码,命令如下:
'''
git fetch --all
git reset --hard origin/master
git pull
'''
# Git clone 远程克隆代码~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 原文链接:https://blog.csdn.net/zyj8691/article/details/79424950
# 添加ssh秘钥
'''
ssh -keygen -t rsa -C "[email protected]"
clip<~/.ssh/id_rsa.pub
'''
# 复制秘钥注:目录为C:\Users\用户名<br>
# 添加秘钥到GitHub<br>
# 左边选择SSHandGPGkeys,然后点击NewSSHkey按钮,<br>
# title设置标题,可以随便填,粘贴在你电脑上生成的key。<br>
# 关联一个远程库
'''
git remote add origin git@server-name:path/repo-name.git;
git remote add origin [email protected]:shaoxiaozuo/GitHub.git
'''
# 注意:git@server-name:path/repo-name.git替换为自己的,在这里复制<br>
'''
`git push -u origin master` # 第一次推送master分支的所有内容;
`git push origin master` # 此后,使用命令推送最新修改;
'''
# ######################################################################################################################
# GIT常用命令用法说明
# ######################################################################################################################
'''
git config; git init; git clone; git add; git commit; git diff; git reset; git status; git rm; git log;
git show; git tag; git branch; git checkout; git merge; git remote; git push; git pull; git stash;
'''
# git config~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git config –global user.name “[name]”
# 用法:git config –global user.email “[email address]”
# 该命令将分别设置提交代码的用户名和电子邮件地址。
# git init~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git init [repository name]
# 该命令可用于创建一个新的代码库。
# git clone~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git clone [url]
# 该命令可用于通过指定的URL获取一个代码库。
# git add~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git add [file]
# 该命令可以将一个文件添加至stage(暂存区)。
# 用法:git add *
# 该命令可以将多个文件添加至stage(暂存区)。
# git commit~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git commit -m “[ Type in the commit message]”
# 该命令可以在版本历史记录中永久记录文件。
#
# 用法:git commit -a
# 该命令将提交git add命令添加的所有文件,并提交git add命令之后更改的所有文件。
# git diff~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git diff
# 该命令可以显示尚未添加到stage的文件的变更。
# 用法:git diff –staged
# 该命令可以显示添加到stage的文件与当前最新版本之间的差异。
# 用法:git diff [first branch] [second branch]
# 该命令可以显示两个分支之间的差异。
# git reset~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git reset [file]
# 该命令将从stage中撤出指定的文件,但可以保留文件的内容。
# 用法:git reset [commit]
# 该命令可以撤销指定提交之后的所有提交,并在本地保留变更。
# 用法:git reset –hard [commit]
# 该命令将丢弃所有的历史记录,并回滚到指定的提交。
# git status~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git status
# 该命令将显示所有需要提交的文件。
# git rm~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git rm [file]
# 该命令将删除工作目录中的文件,并将删除动作添加到stage。
# git log~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git log
# 该命令可用于显示当前分支的版本历史记录。
# 用法:git log –follow[file]
# 该命令可用于显示某个文件的版本历史记录,包括文件的重命名。
# git show~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git show [commit]
# 该命令经显示指定提交的元数据以及内容变更。
# git tag~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git tag [commitID]
# 该命令可以给指定的提交添加标签。
# git branch~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git branch
# 该命令将显示当前代码库中所有的本地分支。
# 用法:git branch [branch name]~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 该命令将创建一个分支。
# 用法:git branch -d [branch name]
# 该命令将删除指定的分支。
# git checkout~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git checkout [branch name]
# 你可以通过该命令切换分支。
# 用法:git checkout -b [branch name]
# 你可以通过该命令创建一个分支,并切换到新分支上。
# git merge~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git merge [branch name]
# 该命令可以将指定分支的历史记录合并到当前分支。
# git remote~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git remote add [variable name] [Remote Server Link]
# 你可以通过该命令将本地的代码库连接到远程服务器。
# git push~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git push [variable name] master
# 该命令可以将主分支上提交的变更发送到远程代码库。
# 用法:git push [variable name] [branch]
# 该命令可以将指定分支上的提交发送到远程代码库。
# 用法:git push –all [variable name]
# 该命令可以将所有分支发送到远程代码库。
# 用法:git push [variable name] :[branch name]
# 该命令可以删除远程代码库上的一个分支。
# git pull~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git pull [Repository Link]
# 该命令将获取远程服务器上的变更,并合并到你的工作目录。
# git stash~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 用法:git stash save
# 该命令将临时保存所有修改的文件。
# 用法:git stash pop
# 该命令将恢复最近一次stash(储藏)的文件。
# 用法:git stash list
# 该命令将显示stash的所有变更。
# 用法:git stash drop
# 该命令将丢弃最近一次stash的变更。
# ######################################################################################################################
# GIT相关基础命令
# ######################################################################################################################
# 安装GIT~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 下载地址:https://git-scm.com/downloads。Windows平台下默认安装即可。
# 可参考百度经验:https://jingyan.baidu.com/article/9f7e7ec0b17cac6f2815548d.html
# Git 配置文件~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 1、基本信息设置
# git config --global user.name #'自己的用户名' #设置用户名
# git config --global user.email #'自己的邮箱'#设置用户名邮箱
# 运行完可以在C:\Users\用户名下.gitconfig查看内容
# 2、初始化一个新的Git仓库
# /mkdir test # 创建文件夹
# cd test # 进入test文件夹
# git init
# 3、创建文件
# touch1.cpp
# git status # 查看状态
# 4、添加到暂存区
# git add 1.cpp
# 5、将文件从暂存区提交到仓库
# git commit -m 'add1.cpp'
# 6、*修改仓库文件,修改1.cpp内容
# cat 1.cpp *可以查看内容
# 7、*添加到暂存区 <br>
# git add1.cpp
# 8、*将文件从暂存区提交到仓库
# git commit -m 'change1.cpp'
# 9、*删除仓库文件
# rm -rf 1.cpp
# 10、*从Git中删除文件
# git rm1.cpp
# 11、*提交操作
# git commit -m #'删除1.cpp'
# 注:带*不是必须操作,供以后操作参考
| [
"[email protected]"
] | |
a83517229a9bc515937ba44bfbcf3ba0b52bde9d | e6f050fd05fb4ca5c296c18c2ee8359017b5cb53 | /final_Python기초/py07선택문/py07_ex13_MaxNumber3.py | 218bcee9b4bf5e81b2c33bdb87d5e4e71aa1906c | [] | no_license | eopr12/pythonfinal | 3f799be3091d9cb34885e922be3de59d6d9245d2 | 823faf1745b6c7d0f6f09941ba1dd8b0482e0d91 | refs/heads/master | 2022-07-15T10:13:09.097154 | 2020-05-16T08:34:39 | 2020-05-16T08:34:39 | 263,354,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,162 | py | # 숫자 입력 받기
x = input( "정수 입력")
y = input( "정수 입력")
z = input( "정수 입력")
# 문자열 정수 변환
x = int( x )
y = int( y )
z = int( z )
# x, y, z 를 비교
if x > y:
# 여기서 비교해야 값들을 무엇인가?
# x와 z를 비교해야 한다.
if x > z:
print("입력받은 수 중 가장 큰수는 ", x, "입니다")
else:
print("입력받은 수 중 가장 큰수는 ", z, "입니다")
else:
# 여기서 비교해야 값들을 무엇인가?
# y와 z를 비교해야 한다.
if y > z:
print("입력받은 수 중 가장 큰수는 ", y, "입니다")
else:
print("입력받은 수 중 가장 큰수는 ", z, "입니다")
# if ~ elif ~ else 로 바꾸어 보기
if x > y and x > z:
print("입력받은 수 중 가장 큰수는 ", x, "입니다")
elif y > z:
print("입력받은 수 중 가장 큰수는 ", y, "입니다")
else:
print("입력받은 수 중 가장 큰수는 ", z, "입니다")
# max 함수를 사용하여 최대값 구하기
maxvalue = max( x, y, z )
print("입력받은 수 중 가장 큰수는 ", maxvalue, "입니다")
| [
"[email protected]"
] | |
d2a8ba2e9ec7dce4043827b32255cd8316892c90 | 2695e14b44da27bc853bcbeb5b1ace3733eb7816 | /2017/day04.py | ddba50ddc3792b4f9fc1f955a4e925cb688b6d2a | [] | no_license | st3fan/aoc | ee19cbb0e2de31554cdec12ec4a8dfc92d0c5ecb | 5dfbb271998402579c373f424574995a1683a4c4 | refs/heads/master | 2022-12-25T19:30:53.957717 | 2022-12-25T03:31:53 | 2022-12-25T03:31:53 | 226,227,540 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | #!/usr/bin/env python3
from aoc import sort_str
from collections import Counter
def read_input():
with open("day04.input") as f:
return f.readlines()
if __name__ == "__main__":
# Part 1
def check_password(password):
c = Counter(password.split())
return c.most_common()[0][1] == 1
print("Part one:", sum(check_password(password) for password in read_input()))
# Part 2
def check_password(password):
c = Counter([sort_str(w) for w in password.split()])
return c.most_common()[0][1] == 1
print("Part two:", sum(check_password(password) for password in read_input()))
| [
"[email protected]"
] | |
ca6a16b00f5b4c1edc81adff0edca68141d06dcb | d04ba4cde1f45d781bdbccd3b07adc4a5c5e55e3 | /data_structures/01_binary_tree/02_binary_search_tree/04_binary_search_tree.py | 03fc73b2a99cde2914b758481b51d1a0ddabdb35 | [] | no_license | lanzhiwang/common_algorithm | 70766604d0faac80af95e829495bbc1cf2d774b6 | 232ad0f2b326ddbf021991a551cd38d39ceccd8f | refs/heads/master | 2020-11-23T21:20:56.067226 | 2020-09-24T06:07:37 | 2020-09-24T06:07:37 | 240,246,083 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,437 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from queue import Queue
'''
构建搜索二叉树
中序遍历 搜索二叉树
判断某个值是否存在
获得最大值和最小值
删除节点
'''
class Node(object):
def __init__(self, value, parent):
self.value = value
self.left = None
self.right = None
#Added in order to delete a node easier
self.parent = parent
def get_value(self):
return self.value
def set_value(self, value):
self.value = value
def get_left(self):
return self.left
def set_left(self, left):
self.left = left
def get_right(self):
return self.right
def set_right(self, right):
self.right = right
def get_parent(self):
return self.parent
def set_parent(self, parent):
self.parent = parent
def __str__(self):
if (self.get_left() is not None) and (self.get_right() is not None):
return 'value: %s, left: %s, right: %s' % (
self.get_value(), self.get_left().get_value(), self.get_right().get_value())
elif (self.get_left() is None) and (self.get_right() is not None):
return 'label: %s, left: %s, right: %s' % (
self.get_value(), None, self.get_right().get_value())
elif (self.get_left() is not None) and (self.get_right() is None):
return 'label: %s, left: %s, right: %s' % (
self.get_value(), self.get_left().get_value(), None)
else:
return 'label: %s, left: %s, right: %s' % (
self.get_value(), None, None)
class BinarySearchTree:
def __init__(self):
self.root = None
def insert(self, value):
new_node = Node(value, None)
if self.empty():
self.root = new_node
else:
curr_node = self.root
while curr_node is not None:
parent_node = curr_node
if new_node.get_value() < curr_node.get_value():
curr_node = curr_node.get_left()
else:
curr_node = curr_node.get_right()
if new_node.get_value() < parent_node.get_value():
parent_node.set_left(new_node)
else:
parent_node.set_right(new_node)
new_node.set_parent(parent_node)
def empty(self):
if self.root is None:
return True
return False
def get_root(self):
return self.root
def get_node(self, value):
curr_node = self.get_root()
while curr_node is not None:
if value == curr_node.get_value():
return curr_node
elif value < curr_node.get_value():
curr_node = curr_node.get_left()
elif value > curr_node.get_value():
curr_node = curr_node.get_right()
else:
return None
def get_max(self, node=None):
if node is None:
node = self.get_root()
while node is not None:
parent_node = node
node = node.get_right()
else:
return parent_node
def get_min(self, node=None):
if node is None:
node = self.get_root()
while node is not None:
parent_node = node
node = node.get_left()
else:
return parent_node
def delete(self, value):
node = self.get_node(value)
if node is None:
return None
if node == self.get_root():
if node.get_left() is None and node.get_right() is None:
node.set_value(None)
elif node.get_left() is None and node.get_right() is not None:
self.root = node.get_right()
node.get_right().set_parent(None)
elif node.get_left() is not None and node.get_right() is None:
self.root = node.get_left()
node.get_left().set_parent(None)
elif node.get_left() is not None and node.get_right() is not None:
left_max = self.get_max(node.get_left())
self.delete(left_max.get_value())
node.set_value(left_max.get_value())
else:
if node.get_left() is None and node.get_right() is None:
if self.__is_right_children(node):
node.get_parent().set_right(None)
else:
node.get_parent().set_left(None)
elif node.get_left() is None and node.get_right() is not None:
if self.__is_right_children(node):
node.get_parent().set_right(node.get_right())
else:
node.get_parent().set_left(node.get_right())
node.get_right().set_parent(node.get_parent())
elif node.get_left() is not None and node.get_right() is None:
if self.__is_right_children(node):
node.get_parent().set_right(node.get_left())
else:
node.get_parent().set_left(node.get_left())
node.get_left().set_parent(node.get_parent())
elif node.get_left() is not None and node.get_right() is not None:
left_max = self.get_max(node.get_left())
self.delete(left_max.get_value())
node.set_value(left_max.get_value())
def __is_right_children(self, node):
if node == self.get_root():
return None
else:
if node.get_parent().get_right() == node:
return True
else:
return False
# 先序(前序)遍历
def __InOrderTraversal(self, node=None):
if node is None:
node = self.get_root()
node_list = []
node_list.append(node)
if node.get_left() is not None:
node_list.extend(self.__InOrderTraversal(node.get_left()))
if node.get_right() is not None:
node_list.extend(self.__InOrderTraversal(node.get_right()))
return node_list
# 中序遍历
def middle_traversal(self, node=None):
if node is None:
node = self.get_root()
node_list = []
if node.get_left() is not None:
node_list.extend(self.middle_traversal(node.get_left()))
node_list.append(node.get_value())
if node.get_right() is not None:
node_list.extend(self.middle_traversal(node.get_right()))
return node_list
# 广度遍历
def breadth_traversal(self, node=None):
if node is None:
node = self.get_root()
node_list = []
queue = Queue()
queue.put(node)
while not queue.empty():
val = queue.get()
node_list.append(val.get_value())
if val.get_left() is not None:
queue.put(val.get_left())
if val.get_right() is not None:
queue.put(val.get_right())
return node_list
def __str__(self):
node_list = self.__InOrderTraversal(self.root)
str = ""
for x in node_list:
str = str + " " + x.get_value().__str__()
return str
def testBinarySearchTree():
r'''
Example
8
/ \
3 10
/ \ \
1 6 14
/ \ /
4 7 13
'''
t = BinarySearchTree()
for i in [8, 3, 6, 1, 10, 14, 13, 4, 7]:
t.insert(i)
#Prints all the elements of the list in order traversal
print(t) # 8 3 1 6 4 7 10 14 13
print(t.breadth_traversal()) # [8, 3, 10, 1, 6, 14, 4, 7, 13]
print(t.middle_traversal()) # [1, 3, 4, 6, 7, 8, 10, 13, 14]
if t.get_node(6) is not None:
print("The label 6 exists")
else:
print("The label 6 doesn't exist")
if t.get_node(-1) is not None:
print("The label -1 exists")
else:
print("The label -1 doesn't exist")
if not t.empty():
print(("Max Value: ", t.get_max().get_value()))
print(("Min Value: ", t.get_min().get_value()))
for i in [13, 10, 8, 3, 6, 14]:
t.delete(i)
r'''
Example
7
/
1
\
4
'''
print(t.breadth_traversal()) # [7, 1, 4]
print(t.middle_traversal()) # [1, 4, 7]
if __name__ == "__main__":
testBinarySearchTree()
| [
"[email protected]"
] | |
9125d09021f8a3acd3df626360458b9b9c2ecf91 | 999ed80db247794159be1d752bc6f0fc272bd117 | /ansible/roles/test/files/ptftests/py3/vxlan_traffic.py | 7c1fd86eaa6d86c68abe422e27c1e0aea982cf59 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | ramakristipati/sonic-mgmt | 7fee876412f0121da96d751f7d199690c73496f3 | a86f0e5b1742d01b8d8a28a537f79bf608955695 | refs/heads/master | 2023-08-31T07:55:38.446663 | 2023-08-31T06:34:53 | 2023-08-31T06:34:53 | 315,448,103 | 2 | 0 | NOASSERTION | 2020-11-23T21:44:07 | 2020-11-23T21:44:07 | null | UTF-8 | Python | false | false | 35,558 | py | # VxLAN Traffic Script, to be run in PTF container. Usage:
# ptf --test-dir ptftests vxlan_traffic.VXLAN --platform-dir ptftests
# --qlen=1000 --platform remote -t
# 't2_ports=[16, 17, 0, 1, 4, 5, 21, 20];dut_mac=u"64:3a:ea:c1:73:f8";\
# expect_encap_success=True;packet_count=10;downed_endpoints=["100.0.1.10"]\
# vxlan_port=4789;topo_file="/tmp/vxlan_topo_file.json";config_file=\
# "/tmp/vxlan-config-TC1-v6_in_v4.json";t0_ports=[u"Ethernet42"];\
# random_src_ip=False;random_dport=True;random_dport=False' --relax
# --debug info --log-file /tmp/vxlan-tests.TC1.v6_in_v4.log
'''
The test checks vxlan encapsulation:
'test_encap' : Sends regular packets to T0-facing interface and expects to
see the encapsulated packets on the T2-facing interfaces.
The test has the following parameters:
config_file : is a filename of a file which contains all
necessary information to run the test. The file is
populated by ansible. This parameter is mandatory.
t2_ports : The list of PTF port indices facing T2 Neighbors,
AKA ports to expect the encapsulated packets to
come in.
dut_mac : The MAC address of the dut, given by "show
platform summary".
expect_encap_success : Is the encapsulation expected to succeed ?
True/False.
packet_count : Number of packets per endpoint to try. Default 10
downned_endpoints : The list of IP addresses which are down, due to BFD
being disabled.
vxlan_port : The global VxLAN port setup in the DUT.
Default: 4789
topo_file : The file that contains the topology information,
like minigraph data, connections, and so on.
t0_ports : The DUT intf into which we will inject payload
packets.
random_src_ip : Should we use random src IP addresses for the
payload packets? Default:False
random_dport : Should we use random dest port for the payload
packets? Default:True
random_sport : Should we use random src port for the payload
packets? Default:False
'''
import os.path
import json
from datetime import datetime
import logging
import random
from ipaddress import ip_address, IPv4Address, IPv6Address
import ptf
import ptf.packet as scapy
from ptf.base_tests import BaseTest
from ptf.testutils import (
simple_tcp_packet,
simple_tcpv6_packet,
simple_vxlan_packet,
simple_vxlanv6_packet,
verify_no_packet_any,
send_packet,
test_params_get,
dp_poll)
from ptf.mask import Mask
VARS = {}
VARS['tcp_sport'] = 1234
VARS['tcp_dport'] = 5000
VARS['udp_sport'] = 1234
Logger = logging.getLogger(__name__)
# Some constants used in this code
MIN_PACKET_COUNT = 4
MINIMUM_PACKETS_FOR_ECMP_VALIDATION = 300
TEST_ECN = True
Address_Count = 0
def get_ip_address(af, hostid=1, netid=100):
'''
Get a new IP address to use based on the arguments.
hostid : The last octet in the Address.
netid : The first octet in the Address.
'''
global Address_Count
third_octet = Address_Count % 255
second_octet = (Address_Count / 255) % 255
first_octet = netid + (Address_Count / 65025)
Address_Count = Address_Count + 1
if af == 'v4':
return "{}.{}.{}.{}".format(
first_octet, second_octet, third_octet, hostid).decode()
if af == 'v6':
# :0: gets removed in the IPv6 addresses.
# Adding a to octets, to avoid it.
return "fddd:a{}:a{}::a{}:{}".format(
first_octet, second_octet, third_octet, hostid).decode()
def get_incremental_value(key):
'''
Global function to keep track of the tcp/udp port numbers used in
payload.
'''
global VARS
# We would like to use the ports from 1234 to 65535
VARS[key] = max(1234, (VARS[key] + 1) % 65535)
return VARS[key]
def read_ptf_macs():
'''
Get the list of mac addresses of all interfaces in the PTF.
'''
addrs = {}
for intf in os.listdir('/sys/class/net'):
if os.path.isdir('/sys/class/net/%s' % intf):
with open('/sys/class/net/%s/address' % intf) as fp:
addrs[intf] = fp.read().strip()
return addrs
class VXLAN(BaseTest):
'''
Testcase for VxLAN. Currently implements encap testcase.
decap is TBD.
'''
def __init__(self):
BaseTest.__init__(self)
def setUp(self):
'''
Setup the internal structures for running the test.
1. Parse the command line arguments.
2. Load the configs from the input files.
3. Ready the mapping of destination->nexthops.
'''
self.dataplane = ptf.dataplane_instance
self.test_params = test_params_get()
self.random_src_ip = self.test_params['random_src_ip']
self.random_dport = self.test_params['random_dport']
self.random_sport = self.test_params['random_sport']
self.tolerance = self.test_params['tolerance']
self.dut_mac = self.test_params['dut_mac']
self.vxlan_port = self.test_params['vxlan_port']
self.expect_encap_success = self.test_params['expect_encap_success']
self.packet_count = self.test_params['packet_count']
self.downed_endpoints = self.test_params['downed_endpoints']
self.t2_ports = self.test_params['t2_ports']
# The ECMP check fails occasionally if there is not enough packets.
# We should keep the packet count atleast MIN_PACKET_COUNT.
if self.packet_count < MIN_PACKET_COUNT:
Logger.warning(
"Packet_count is below minimum, resetting to %s",
MIN_PACKET_COUNT)
self.packet_count = MIN_PACKET_COUNT
self.random_mac = "00:aa:bb:cc:dd:ee"
self.ptf_mac_addrs = read_ptf_macs()
with open(self.test_params['config_file']) as fp:
self.config_data = json.load(fp)
with open(self.test_params['topo_file']) as fp:
self.topo_data = json.load(fp)
self.fill_loopback_ip()
self.nbr_info = self.config_data['neighbors']
self.packets = []
self.dataplane.flush()
self.vxlan_enabled = True
return
def tearDown(self):
'''
Close the packet capture file.
'''
if self.vxlan_enabled:
json.dump(self.packets, open("/tmp/vnet_pkts.json", 'w'))
return
def fill_loopback_ip(self):
'''
Get the DUT's Loopback ipv4 ipv6 addresses from minigraph.
'''
loop_config_data = \
self.topo_data['minigraph_facts']['minigraph_lo_interfaces']
for entry in loop_config_data:
if isinstance(ip_address(entry['addr']), IPv4Address):
self.loopback_ipv4 = entry['addr']
if isinstance(ip_address(entry['addr']), IPv6Address):
self.loopback_ipv6 = entry['addr']
def runTest(self):
'''
Main code of this script.
Run the encap test for every destination, and its nexthops.
'''
mg_facts = self.topo_data['minigraph_facts']
for t0_intf in self.test_params['t0_ports']:
# find the list of neigh addresses for the t0_ports.
# For each neigh address(Addr1):
# For each destination address(Addr2) in the same Vnet as t0_intf,
# send traffic from Add1 to it. If there
# are multiple nexthops for the Addr2, then send that
# many different streams(different tcp ports).
neighbors = [self.config_data['neighbors'][t0_intf]]
ptf_port = mg_facts['minigraph_ptf_indices'][t0_intf]
vnet = self.config_data['vnet_intf_map'][t0_intf]
vni = self.config_data['vnet_vni_map'][vnet]
for addr in neighbors:
for destination, nexthops in \
list(self.config_data['dest_to_nh_map'][vnet].items()):
self.test_encap(
ptf_port,
vni,
addr,
destination,
nexthops,
test_ecn=TEST_ECN,
random_dport=self.random_dport,
random_sport=self.random_sport,
random_src_ip=self.random_src_ip)
def verify_all_addresses_used_equally(self,
nhs,
returned_ip_addresses,
packet_count,
downed_endpoints=[]):
'''
Verify the ECMP functionality using 2 checks.
Check 1 verifies every nexthop address has been used.
Check 2 verifies the distribution of number of packets among the
nexthops.
Params:
nhs : the nexthops that are configured.
returned_ip_addresses : The dict containing the nh addresses
and corresponding packet counts.
'''
if downed_endpoints:
for down_endpoint in downed_endpoints:
if down_endpoint in nhs:
nhs.remove(down_endpoint)
if down_endpoint in returned_ip_addresses:
raise RuntimeError(
"We received traffic with a downed endpoint({}), "
"unexpected.".format(down_endpoint))
# Check #1 : All addresses have been used, except the downed ones.
if set(nhs) - set(returned_ip_addresses.keys()) == set([]):
Logger.info(" Each valid endpoint address has been used")
Logger.info("Packets sent:%s distribution:", packet_count)
for nh_address in list(returned_ip_addresses.keys()):
Logger.info(" %s : %s",
nh_address,
returned_ip_addresses[nh_address])
# Check #2 : The packets are almost equally distributed.
# Every next-hop should have received within {tolerance}% of the
# packets that we sent per nexthop(which is packet_count). This
# check is valid only if there are large enough number of
# packets(300). Any lower number will need higher
# tolerance(more than 2%).
if packet_count > MINIMUM_PACKETS_FOR_ECMP_VALIDATION:
for nh_address in list(returned_ip_addresses.keys()):
if (1.0-self.tolerance) * packet_count <= \
returned_ip_addresses[nh_address] <= \
(1.0+self.tolerance) * packet_count:
pass
else:
raise RuntimeError(
"ECMP nexthop address: {} received too less or too"
" many of the packets expected. Expected:{}, "
"received on that address:{}".format(
nh_address,
packet_count,
returned_ip_addresses[nh_address]))
else:
raise RuntimeError(
"Not all addresses were used. Here are the unused ones:{},"
"expected:{}, got:{}".format(
set(nhs) - set(returned_ip_addresses.keys()),
nhs,
returned_ip_addresses))
def test_encap(
self,
ptf_port,
vni,
ptf_addr,
destination,
nhs,
test_ecn=False,
random_dport=True,
random_sport=False,
random_src_ip=False):
'''
Test the encapsulation of packets works correctly.
1. Send a TCP packet to the DUT port.
2. Verify that the DUT returns an encapsulated packet correctly.
3. Optionally: Perform if the ECMP is working(all nexthops are used
equally).
'''
try:
pkt_len = 100
options = {'ip_ecn': 0}
options_v6 = {'ipv6_ecn': 0}
if test_ecn:
ecn = random.randint(0, 3)
options = {'ip_ecn': ecn}
options_v6 = {'ipv6_ecn': ecn}
# ECMP support, assume it is a string of comma seperated list of
# addresses.
check_ecmp = False
working_nhs = list(set(nhs) - set(self.downed_endpoints))
expect_success = self.expect_encap_success
test_nhs = working_nhs
packet_count = self.packet_count
if not working_nhs:
# Since there is no NH that is up for this destination,
# we can't expect success here.
expect_success = False
test_nhs = nhs
# Also reduce the packet count, since this script has to wait
# 1 second per packet(1000 packets is 20 minutes).
packet_count = 4
returned_ip_addresses = {}
for host_address in test_nhs:
check_ecmp = True
# This will ensure that every nh is used atleast once.
Logger.info(
"Sending %s packets from port %s to %s",
packet_count,
str(ptf_port),
destination)
for _ in range(packet_count):
if random_sport:
tcp_sport = get_incremental_value('tcp_sport')
else:
tcp_sport = VARS['tcp_sport']
if random_dport:
tcp_dport = get_incremental_value('tcp_dport')
else:
tcp_dport = VARS['tcp_dport']
if isinstance(ip_address(destination), IPv4Address) and \
isinstance(ip_address(ptf_addr), IPv4Address):
if random_src_ip:
ptf_addr = get_ip_address(
"v4", hostid=3, netid=170)
pkt_opts = {
"pktlen": pkt_len,
"eth_dst": self.dut_mac,
"eth_src": self.ptf_mac_addrs['eth%d' % ptf_port],
"ip_dst": destination,
"ip_src": ptf_addr,
"ip_id": 105,
"ip_ttl": 64,
"tcp_sport": tcp_sport,
"tcp_dport": tcp_dport}
pkt_opts.update(options)
pkt = simple_tcp_packet(**pkt_opts)
pkt_opts['ip_ttl'] = 63
pkt_opts['eth_src'] = self.dut_mac
exp_pkt = simple_tcp_packet(**pkt_opts)
elif isinstance(ip_address(destination), IPv6Address) and \
isinstance(ip_address(ptf_addr), IPv6Address):
if random_src_ip:
ptf_addr = get_ip_address(
"v6", hostid=4, netid=170)
pkt_opts = {
"pktlen": pkt_len,
"eth_dst": self.dut_mac,
"eth_src": self.ptf_mac_addrs['eth%d' % ptf_port],
"ipv6_dst": destination,
"ipv6_src": ptf_addr,
"ipv6_hlim": 64,
"tcp_sport": tcp_sport,
"tcp_dport": VARS['tcp_dport']}
pkt_opts.update(options_v6)
pkt = simple_tcpv6_packet(**pkt_opts)
pkt_opts['ipv6_hlim'] = 63
pkt_opts['eth_src'] = self.dut_mac
exp_pkt = simple_tcpv6_packet(**pkt_opts)
else:
raise RuntimeError(
"Invalid mapping of destination and PTF address.")
udp_sport = 1234 # it will be ignored in the test later.
udp_dport = self.vxlan_port
if isinstance(ip_address(host_address), IPv4Address):
encap_pkt = simple_vxlan_packet(
eth_src=self.dut_mac,
eth_dst=self.random_mac,
ip_id=0,
ip_ihl=5,
ip_src=self.loopback_ipv4,
ip_dst=host_address,
ip_ttl=128,
udp_sport=udp_sport,
udp_dport=udp_dport,
with_udp_chksum=False,
vxlan_vni=vni,
inner_frame=exp_pkt,
**options)
encap_pkt[scapy.IP].flags = 0x2
elif isinstance(ip_address(host_address), IPv6Address):
encap_pkt = simple_vxlanv6_packet(
eth_src=self.dut_mac,
eth_dst=self.random_mac,
ipv6_src=self.loopback_ipv6,
ipv6_dst=host_address,
udp_sport=udp_sport,
udp_dport=udp_dport,
with_udp_chksum=False,
vxlan_vni=vni,
inner_frame=exp_pkt,
**options_v6)
send_packet(self, ptf_port, str(pkt))
# After we sent all packets, wait for the responses.
if expect_success:
wait_timeout = 2
loop_timeout = max(packet_count * 5, 1000) # milliseconds
start_time = datetime.now()
vxlan_count = 0
Logger.info("Loop time:out %s milliseconds", loop_timeout)
while (datetime.now() - start_time).total_seconds() *\
1000 < loop_timeout and vxlan_count < packet_count:
result = dp_poll(
self, timeout=wait_timeout
)
if isinstance(result, self.dataplane.PollSuccess):
if not isinstance(
result, self.dataplane.PollSuccess) or \
result.port not in self.t2_ports or \
"VXLAN" not in scapy.Ether(result.packet):
continue
else:
vxlan_count += 1
scapy_pkt = scapy.Ether(result.packet)
# Store every destination that was received.
if isinstance(
ip_address(host_address), IPv6Address):
dest_ip = scapy_pkt['IPv6'].dst
else:
dest_ip = scapy_pkt['IP'].dst
try:
returned_ip_addresses[dest_ip] = \
returned_ip_addresses[dest_ip] + 1
except KeyError:
returned_ip_addresses[dest_ip] = 1
else:
Logger.info("No packet came in %s seconds",
wait_timeout)
break
if not vxlan_count or not returned_ip_addresses:
raise RuntimeError(
"Didnot get any reply for this destination:{}"
" Its active endpoints:{}".format(
destination, test_nhs))
Logger.info(
"Vxlan packets received:%s, loop time:%s "
"seconds", vxlan_count,
(datetime.now() - start_time).total_seconds())
Logger.info("received = {}".format(returned_ip_addresses))
else:
check_ecmp = False
Logger.info("Verifying no packet")
masked_exp_pkt = Mask(encap_pkt)
masked_exp_pkt.set_ignore_extra_bytes()
masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "src")
masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "dst")
if isinstance(ip_address(host_address), IPv4Address):
masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "ttl")
masked_exp_pkt.set_do_not_care_scapy(scapy.IP,
"chksum")
masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "dst")
else:
masked_exp_pkt.set_do_not_care_scapy(scapy.IPv6,
"hlim")
masked_exp_pkt.set_do_not_care_scapy(scapy.IPv6,
"chksum")
masked_exp_pkt.set_do_not_care_scapy(scapy.IPv6,
"dst")
masked_exp_pkt.set_do_not_care_scapy(scapy.UDP,
"sport")
masked_exp_pkt.set_do_not_care_scapy(scapy.UDP,
"chksum")
try:
verify_no_packet_any(
self,
masked_exp_pkt,
self.t2_ports)
except BaseException:
raise RuntimeError(
"Verify_no_packet failed. Args:ports:{} sent:{}\n,"
"expected:{}\n, encap_pkt:{}\n".format(
self.t2_ports,
repr(pkt),
repr(exp_pkt),
repr(encap_pkt)))
# Verify ECMP:
if check_ecmp:
self.verify_all_addresses_used_equally(
nhs,
returned_ip_addresses,
packet_count,
self.downed_endpoints)
pkt.load = '0' * 60 + str(len(self.packets))
self.packets.append((ptf_port, str(pkt).encode("base64")))
finally:
Logger.info("")
class VxLAN_in_VxLAN(VXLAN):
def test_encap(
self,
ptf_port,
vni,
ptf_addr,
destination,
nhs,
test_ecn=False,
random_dport=True,
random_sport=False,
random_src_ip=False):
'''
Test the encapsulation of packets works correctly when the payload
itself is a vxlan packet.
1. Send a TCP packet to the DUT port.
2. Verify that the DUT returns an encapsulated packet correctly.
3. Optionally: Perform if the ECMP is working(all nexthops are used
equally).
'''
pkt_len = 100
pkt_opts = {
"pktlen": pkt_len,
"eth_dst": "aa:bb:cc:dd:ee:ff",
"eth_src": "ff:ee:dd:cc:bb:aa",
"ip_dst": "1.1.1.1",
"ip_src": "2.2.2.2",
"ip_id": 105,
"ip_ttl": 64,
"tcp_sport": 3000,
"tcp_dport": 5000}
innermost_frame = simple_tcp_packet(**pkt_opts)
try:
pkt_len = 100
udp_dport = self.vxlan_port
options = {'ip_ecn': 0}
options_v6 = {'ipv6_ecn': 0}
if test_ecn:
ecn = random.randint(0, 3)
options = {'ip_ecn': ecn}
options_v6 = {'ipv6_ecn': ecn}
# ECMP support, assume it is a string of comma seperated list of
# addresses.
check_ecmp = False
working_nhs = list(set(nhs) - set(self.downed_endpoints))
expect_success = self.expect_encap_success
test_nhs = working_nhs
packet_count = self.packet_count
if not working_nhs:
# Since there is no NH that is up for this destination,
# we can't expect success here.
expect_success = False
test_nhs = nhs
# Also reduce the packet count, since this script has to wait
# 1 second per packet(1000 packets is 20 minutes).
packet_count = 4
returned_ip_addresses = {}
for host_address in test_nhs:
check_ecmp = True
# This will ensure that every nh is used atleast once.
Logger.info(
"Sending %s packets from port %s to %s",
packet_count,
str(ptf_port),
destination)
for _ in range(packet_count):
udp_sport = get_incremental_value('udp_sport')
if isinstance(ip_address(destination), IPv4Address) and \
isinstance(ip_address(ptf_addr), IPv4Address):
if random_src_ip:
ptf_addr = get_ip_address(
"v4", hostid=3, netid=170)
pkt_opts = {
'eth_src': self.random_mac,
'eth_dst': self.dut_mac,
'ip_id': 0,
'ip_ihl': 5,
'ip_src': ptf_addr,
'ip_dst': destination,
'ip_ttl': 63,
'udp_sport': udp_sport,
'udp_dport': udp_dport,
'with_udp_chksum': False,
'vxlan_vni': vni,
'inner_frame': innermost_frame}
pkt_opts.update(**options)
pkt = simple_vxlan_packet(**pkt_opts)
pkt_opts['ip_ttl'] = 62
pkt_opts['eth_dst'] = self.random_mac
pkt_opts['eth_src'] = self.dut_mac
exp_pkt = simple_vxlan_packet(**pkt_opts)
elif isinstance(ip_address(destination), IPv6Address) and \
isinstance(ip_address(ptf_addr), IPv6Address):
if random_src_ip:
ptf_addr = get_ip_address(
"v6", hostid=4, netid=170)
pkt_opts = {
"pktlen": pkt_len,
"eth_dst": self.dut_mac,
"eth_src": self.ptf_mac_addrs['eth%d' % ptf_port],
"ipv6_dst": destination,
"ipv6_src": ptf_addr,
"ipv6_hlim": 64,
"udp_sport": udp_sport,
"udp_dport": udp_dport,
'inner_frame': innermost_frame}
pkt_opts.update(**options_v6)
pkt = simple_vxlanv6_packet(**pkt_opts)
pkt_opts.update(options_v6)
pkt_opts['eth_dst'] = self.random_mac
pkt_opts['eth_src'] = self.dut_mac
pkt_opts['ipv6_hlim'] = 63
exp_pkt = simple_vxlanv6_packet(**pkt_opts)
else:
raise RuntimeError(
"Invalid mapping of destination and PTF address.")
udp_sport = 1234 # it will be ignored in the test later.
udp_dport = self.vxlan_port
if isinstance(ip_address(host_address), IPv4Address):
encap_pkt = simple_vxlan_packet(
eth_src=self.dut_mac,
eth_dst=self.random_mac,
ip_id=0,
ip_ihl=5,
ip_src=self.loopback_ipv4,
ip_dst=host_address,
ip_ttl=63,
udp_sport=udp_sport,
udp_dport=udp_dport,
with_udp_chksum=False,
vxlan_vni=vni,
inner_frame=exp_pkt,
**options)
encap_pkt[scapy.IP].flags = 0x2
elif isinstance(ip_address(host_address), IPv6Address):
encap_pkt = simple_vxlanv6_packet(
eth_src=self.dut_mac,
eth_dst=self.random_mac,
ipv6_src=self.loopback_ipv6,
ipv6_dst=host_address,
udp_sport=udp_sport,
udp_dport=udp_dport,
with_udp_chksum=False,
vxlan_vni=vni,
inner_frame=exp_pkt,
**options_v6)
send_packet(self, ptf_port, str(pkt))
# After we sent all packets, wait for the responses.
if expect_success:
wait_timeout = 2
loop_timeout = max(packet_count * 5, 1000) # milliseconds
start_time = datetime.now()
vxlan_count = 0
Logger.info("Loop time:out %s milliseconds", loop_timeout)
while (datetime.now() - start_time).total_seconds() *\
1000 < loop_timeout and vxlan_count < packet_count:
result = dp_poll(
self, timeout=wait_timeout
)
if isinstance(result, self.dataplane.PollSuccess):
if not isinstance(
result, self.dataplane.PollSuccess) or \
result.port not in self.t2_ports or \
"VXLAN" not in scapy.Ether(result.packet):
continue
else:
vxlan_count += 1
scapy_pkt = scapy.Ether(result.packet)
# Store every destination that was received.
if isinstance(
ip_address(host_address), IPv6Address):
dest_ip = scapy_pkt['IPv6'].dst
else:
dest_ip = scapy_pkt['IP'].dst
try:
returned_ip_addresses[dest_ip] = \
returned_ip_addresses[dest_ip] + 1
except KeyError:
returned_ip_addresses[dest_ip] = 1
else:
Logger.info("No packet came in %s seconds",
wait_timeout)
break
if not vxlan_count or not returned_ip_addresses:
raise RuntimeError(
"Didnot get any reply for this destination:{}"
" Its active endpoints:{}".format(
destination, test_nhs))
Logger.info(
"Vxlan packets received:%s, loop time:%s "
"seconds", vxlan_count,
(datetime.now() - start_time).total_seconds())
Logger.info("received = {}".format(returned_ip_addresses))
else:
check_ecmp = False
Logger.info("Verifying no packet")
masked_exp_pkt = Mask(encap_pkt)
masked_exp_pkt.set_ignore_extra_bytes()
masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "src")
masked_exp_pkt.set_do_not_care_scapy(scapy.Ether, "dst")
if isinstance(ip_address(host_address), IPv4Address):
masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "ttl")
masked_exp_pkt.set_do_not_care_scapy(scapy.IP,
"chksum")
masked_exp_pkt.set_do_not_care_scapy(scapy.IP, "dst")
else:
masked_exp_pkt.set_do_not_care_scapy(scapy.IPv6,
"hlim")
masked_exp_pkt.set_do_not_care_scapy(scapy.IPv6,
"chksum")
masked_exp_pkt.set_do_not_care_scapy(scapy.IPv6,
"dst")
masked_exp_pkt.set_do_not_care_scapy(scapy.UDP,
"sport")
masked_exp_pkt.set_do_not_care_scapy(scapy.UDP,
"chksum")
try:
verify_no_packet_any(
self,
masked_exp_pkt,
self.t2_ports)
except BaseException:
raise RuntimeError(
"Verify_no_packet failed. Args:ports:{} sent:{}\n,"
"expected:{}\n, encap_pkt:{}\n".format(
self.t2_ports,
repr(pkt),
repr(exp_pkt),
repr(encap_pkt)))
# Verify ECMP:
if check_ecmp:
self.verify_all_addresses_used_equally(
nhs,
returned_ip_addresses,
packet_count,
self.downed_endpoints)
pkt.load = '0' * 60 + str(len(self.packets))
self.packets.append((ptf_port, str(pkt).encode("base64")))
finally:
Logger.info("")
| [
"[email protected]"
] | |
2f495d11f725de3593af9efa7b955f96e30c74cd | d767a2048c050421e7213be2ecccff09014e270e | /Day 27/Tram(Codeforces).py | cedb8bcf0809c322aaceb216fc9fc25d3e032cae | [] | no_license | Benson1198/31-Days-of-CP | 23ff16f9899d37e2ca9a1eba81a87b521233fd2f | 0e5de1d0b4e1d4811fb096455de951f37c3d69d0 | refs/heads/master | 2022-09-18T22:26:53.178381 | 2020-06-03T14:20:41 | 2020-06-03T14:20:41 | 260,527,724 | 2 | 1 | null | 2020-05-04T17:36:36 | 2020-05-01T18:15:21 | Python | UTF-8 | Python | false | false | 291 | py | max_count = 0
passengers = 0
for _ in range(int(input())):
a,b = [int(y) for y in input().split()]
passengers -= a
passengers += b
if passengers > max_count:
max_count = passengers
else:
continue
if max_count <= 0:
print(0)
else:
print(max_count) | [
"[email protected]"
] | |
0ce6e1ec625e46230a485f4fc2c8530032363eed | d703c7eed3e23f087ee7b6b4cbf75db8cc39d614 | /disk_snapshot_service/tests/test_db_operation.py | 5c204dc1d5fafcd8f15e139e18903b991d05670d | [] | no_license | ShawnYi5/OldDisk | e25caed0fa57aebad14a4b1d7c1ac247c073c1e7 | 3d646e37e044f7736ddb6929e43b802aca0608a2 | refs/heads/master | 2020-07-07T01:52:22.587231 | 2019-08-22T03:40:15 | 2019-08-22T03:40:15 | 203,207,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | import pytest
from disk_snapshot_service.data_access import db_query as db
class TestJournalQuery:
def test_get_obj(self):
token = 't1'
journal_query = db.JournalQuery(token)
assert journal_query.get_obj().tree_ident == 'ti1'
def test_get_inst(self):
token = 't1'
journal_query = db.JournalQuery(token)
assert journal_query.get_inst().new_ident == '112112'
def test_get_obj_dict(self):
token = 't1'
journal_query = db.JournalQuery(token)
assert journal_query.get_obj_dict()['id'] == 1
class TestUnconsumedJournalsQuery:
def test_query_objs(self):
tokens = ['t2', 't3']
#
| [
"[email protected]"
] | |
e5f854922cbbbb29b068601631417858894a2697 | c7d91529db199322e39e54fe4051a75704ea843e | /NewKe/t1.4.py | af510b6ef35c611063e2b3855df368ecbdfec846 | [] | no_license | 2226171237/Algorithmpractice | fc786fd47aced5cd6d96c45f8e728c1e9d1160b7 | 837957ea22aa07ce28a6c23ea0419bd2011e1f88 | refs/heads/master | 2020-12-26T07:20:37.226443 | 2020-09-13T13:31:05 | 2020-09-13T13:31:05 | 237,431,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,612 | py | '''
给定一棵树的根节点, 在已知该树最大深度的情况下, 求节点数最多的那一层并返回具体的层数。
如果最后答案有多层, 输出最浅的那一层,树的深度不会超过100000。实现代码如下,请指出代码中的多处错误:
struct Node{
vector < Node * > sons;
};
void dfsFind(Node * node, int dep, int counter[])
{
counter[dep] + +;
for (int i = 0; i < node.sons.size();i + +)
{
dfsFind(node.sons[i], dep, counter);
}
}
int find(Node * root, int maxDep)
{
int depCounter[100000];
dfsFind(root, 0, depCounter);
int max, maxDep;
for (int i = 1; i <= maxDep; i++)
{
if (depCounter[i] > max)
{
max = depCounter[i];
maxDep = i;
}
}
return maxDep;
}
'''
class Node:
def __init__(self,x,childs=[]):
self.x=x
self.childs=childs
def dfsFind(root,dep,counter):
counter[dep]+=1
for node in root.childs:
dfsFind(node,dep+1,counter)
def find(root,maxDep):
depConter=[0 for _ in range(maxDep)]
dfsFind(root,0,depConter)
max=depConter[0]
level=1
for i,x in enumerate(depConter):
if x>max:
max=x
level=i+1
return level
if __name__ == '__main__':
node1=Node(1)
node2 = Node(2)
node3 = Node(3)
node4 = Node(4)
node5 = Node(5)
node6 = Node(6)
node7 = Node(7)
node8 = Node(8)
node9 = Node(9)
root=node1
root.childs=[node2,node3,node4,node5,node6]
node2.childs=[node7]
node3.childs=[node8,node9]
print(find(root,10)) | [
"[email protected]"
] | |
dae91cd3c9afcdb9b36001351d4b578d21c1e5d2 | 1af78033850e5bbe7a66ad83a238b96e7e2f2778 | /app/pagination.py | 83c5b81cd2d0cecaddd9c5a0db10732f5d21b0b5 | [
"MIT"
] | permissive | Sean10/flask_demo | e7c0aed4a0633f03ded079cadec322dc4bdc6076 | a04b284a1e812f5d291b67fbd04e3073063003f1 | refs/heads/master | 2020-03-27T22:22:30.677486 | 2018-09-03T15:55:10 | 2018-09-03T15:55:10 | 147,225,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 920 | py | # coding=utf-8
from math import ceil
class Pagination(object):
'''
页码
'''
def __init__(self, page, per_page, total_count):
self.page = page
self.per_page = per_page
self.total_count = total_count
@property
def pages(self):
return int(ceil(self.total_count / float(self.per_page)))
@property
def has_prev(self):
return self.page > 1
@property
def has_next(self):
return self.page < self.pages
def iter_pages(self, left_edge=2, left_current=2,
right_current=5, right_edge=2):
last = 0
for num in xrange(1, self.pages + 1):
if num <= left_edge or (num > self.page - left_current - 1 and num < self.page + right_current) or num > self.pages - right_edge:
if last + 1 != num:
yield None
yield num
last = num
| [
"[email protected]"
] | |
172143bf2e071a8d101ca35ceab439bdbb74fb6c | a37b756e34fc39c1237fc68997dbef77df9fa6fc | /keras/keras17-33/keras32_3_cifar10_dnn.py | 9bdd250e03d846063f30ccecbd3ab48e1c9a996d | [] | no_license | jvd2n/ai-study | e20e38493ad295940a3201fc0cc8061ca9052607 | a82f7c6d89db532f881c76b553b5ab3eea0bdd59 | refs/heads/main | 2023-08-06T03:24:39.182686 | 2021-10-06T14:41:01 | 2021-10-06T14:41:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,933 | py | from tensorflow.keras.callbacks import EarlyStopping
from sklearn import preprocessing
from icecream import ic
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import cifar10
from keras.utils import np_utils
# 1. Data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.reshape(50000, 32 * 32 * 3)
x_test = x_test.reshape(10000, 32 * 32 * 3)
from sklearn.preprocessing import MinMaxScaler, StandardScaler, MaxAbsScaler, PowerTransformer
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
x_train = x_train.reshape(50000, 32 * 32, 3)
x_test = x_test.reshape(10000, 32 * 32, 3)
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# 2. Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, GlobalAveragePooling1D
model = Sequential()
model.add(Dense(100, activation='relu', input_shape=(32 * 32 * 3,)))
model.add(Dense(64, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(10, activation='softmax'))
# 3 Compile, Train metrics=['accuracy']
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
# es = EarlyStopping(monitor='loss', patience=5, mode='min', verbose=1)
es = EarlyStopping(monitor='val_loss', patience=10, mode='min', verbose=1)
model.fit(x_train, y_train, epochs=100, batch_size=64, verbose=2, validation_split=0.025, callbacks=[es])
# 4 Evaluate
ic('================= EVALUATE ==================')
loss = model.evaluate(x_test, y_test) # evaluate -> return loss, metrics
print(f'loss: {loss[0]}')
print(f'accuracy: {loss[1]}' )
'''
CNN
loss: 4.017408847808838
accuracy: 0.6402000188827515
DNN
loss: 1.5292080640792847
accuracy: 0.4584999978542328
'''
| [
"[email protected]"
] | |
12b171e219cace843369a845ad03ca1f6e6af427 | 368c66467b78adf62da04cb0b8cedd2ef37bb127 | /SW expert/python/2063_중간값찾기.py | 89048c4e77238eb8510d34160fc102dabbda4779 | [] | no_license | DJHyun/Algorithm | c8786ddcd8b5693fc9b3b4721fdf1eeda21611c5 | fd6ae800886dac4ec5ff6cf2618bc2c839a76e7a | refs/heads/master | 2020-07-30T16:32:49.344329 | 2020-02-25T07:59:34 | 2020-02-25T07:59:34 | 210,289,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | import sys
sys.stdin = open("2063_중간값찾기.txt","r")
n = int(input())
numbers = list(map(int,input().split()))
numbers.sort()
print(numbers[n//2]) | [
"[email protected]"
] | |
898c375bf0a507678255b0a240530f2dbe42e88a | db73076ffc750bc1a13cef32d0e03724f40eca5f | /pystudy/base/loop_console.py | 3ce7266cc947f8ba987e2d3774b26583bf715700 | [] | no_license | shengqianfeng/deeplearning | fe4791109ab4dbe37c9d9e81131a511e96b5d980 | 66f6d98cc6d0a680663816ea9329ab17e7f9811f | refs/heads/master | 2022-04-24T04:22:24.867940 | 2020-04-26T13:10:00 | 2020-04-26T13:10:00 | 231,713,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,293 | py |
"""
循环控制
"""
print(1 + 2 + 3)
# Python的循环有两种,一种是for...in循环,依次把list或tuple中的每个元素迭代出来
# 遍历list
names = ['Michael', 'Bob', 'Tracy']
for name in names:
print(name)
# 累加和
sum = 0
for x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:
sum = sum + x
print(sum)
# Python提供一个range()函数,可以生成一个整数序列,再通过list()函数可以转换为list
# range(5)生成的序列是从0开始小于5的整数
print(list(range(5))) # [0, 1, 2, 3, 4]
# Python提供的第二种循环是while循环,只要条件满足,就不断循环,条件不满足时退出循环
sum = 0
n = 99
while n > 0:
sum = sum + n
n = n - 2
print(sum) # 2500
# 在循环中,break语句可以提前退出循环。
n = 1
while n <= 100:
if n > 10: # 当n = 11时,条件满足,执行break语句
break # break语句会结束当前循环
print(n)
n = n + 1
print('END')
# 循环过程中,也可以通过continue语句,跳过当前的这次循环,直接开始下一次循环
n = 0
while n < 10:
n = n + 1
if n % 2 == 0: # 如果n是偶数,执行continue语句
continue # continue语句会直接继续下一轮循环,后续的print()语句不会执行
print(n)
| [
"answer369"
] | answer369 |
fd1753a305f7aef9054ce1694e44cab83eae9ddd | 4a7804ee05485c345b4e3c39a0c96ed4012542ac | /multimedia/stream/darkice/actions.py | c10beae5d50bff371400cff667324b8b06b5a990 | [] | no_license | Erick-Pardus/Pardus | 1fef143c117c62a40e3779c3d09f5fd49b5a6f5c | 2693e89d53304a216a8822978e13f646dce9b1d3 | refs/heads/master | 2020-12-31T02:49:33.189799 | 2013-03-17T06:29:33 | 2013-03-17T06:29:33 | 17,247,989 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005-2011 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
autotools.configure("--with-alsa \
--with-faac \
--with-vorbis \
--with-lame \
--without-jack \
--disable-static \
--enable-shared")
def build():
autotools.make()
def install():
autotools.rawInstall('DESTDIR="%s"' % get.installDIR())
pisitools.dodoc("AUTHORS", "ChangeLog", "COPYING", "FAQ", "NEWS", "README", "TODO")
| [
"[email protected]"
] | |
31950ddc7f16dc6114686bc41c78a2e1f2207ac9 | c270b1605e8237d4b0539364687caa866f16847c | /Chapter03/Ch3.HeartDisease.py | f282f4b0a81431f66efc9139048f6082cc46f9b9 | [
"MIT"
] | permissive | PacktPublishing/Keras-2.x-Projects | 7b580652e7357d57f77e794a5390d2a90f226f37 | eb60a9b3f3fefa17ee90774edb38d88d31bacd2b | refs/heads/master | 2023-02-09T02:48:19.104508 | 2023-01-30T09:31:54 | 2023-01-30T09:31:54 | 163,262,925 | 14 | 22 | null | null | null | null | UTF-8 | Python | false | false | 2,719 | py | import pandas as pd
#Import data
HDNames= ['age','sex','cp','trestbps','chol','fbs','restecg','thalach','exang','oldpeak','slope','ca','hal','HeartDisease']
Data = pd.read_excel('Ch3.ClevelandData.xlsx', names=HDNames)
print(Data.head(20))
print(Data.info())
summary = Data.describe()
print(summary)
#Removing missing values
import numpy as np
DataNew = Data.replace('?', np.nan)
print(DataNew.info())
print(DataNew.describe())
print(DataNew.isnull().sum())
DataNew = DataNew.dropna()
print(DataNew.info())
print(DataNew.isnull().sum())
#Divide DataFrame
InputNames = HDNames
InputNames.pop()
Input = pd.DataFrame(DataNew.iloc[:, 0:13],columns=InputNames)
Target = pd.DataFrame(DataNew.iloc[:, 13],columns=['HeartDisease'])
#Data scaling
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
print(scaler.fit(Input))
InputScaled = scaler.fit_transform(Input)
InputScaled = pd.DataFrame(InputScaled,columns=InputNames)
summary = InputScaled.describe()
summary = summary.transpose()
print(summary)
#Data visualitation
#DataScaled = pd.concat([InputScaled, Target], axis=1)
import matplotlib.pyplot as plt
boxplot = InputScaled.boxplot(column=InputNames,showmeans=True)
plt.show()
pd.plotting.scatter_matrix(InputScaled, figsize=(6, 6))
plt.show()
CorData = InputScaled.corr(method='pearson')
with pd.option_context('display.max_rows', None, 'display.max_columns', CorData.shape[1]):
print(CorData)
plt.matshow(CorData)
plt.xticks(range(len(CorData.columns)), CorData.columns)
plt.yticks(range(len(CorData.columns)), CorData.columns)
plt.colorbar()
plt.show()
#Split the data
from sklearn.model_selection import train_test_split
Input_train, Input_test, Target_train, Target_test = train_test_split(InputScaled, Target, test_size = 0.30, random_state = 5)
print(Input_train.shape)
print(Input_test.shape)
print(Target_train.shape)
print(Target_test.shape)
from keras.models import Sequential
from keras.layers import Dense
model = Sequential()
model.add(Dense(30, input_dim=13, activation='tanh'))
model.add(Dense(20, activation='tanh'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
model.fit(Input_train, Target_train, epochs=1000, verbose=1)
model.summary()
score = model.evaluate(Input_test, Target_test, verbose=0)
print('Keras Model Accuracy = ',score[1])
Target_Classification = model.predict(Input_test)
Target_Classification = (Target_Classification > 0.5)
from sklearn.metrics import confusion_matrix
print(confusion_matrix(Target_test, Target_Classification))
| [
"[email protected]"
] | |
84f3ba1e4e7a4d00b0d308008823960d85e55aaa | e45d2faad9389886a82ff5176853b1ff6e37caae | /016_regular_expressions.py | 0fa7d075943a433a11d0bbd0d6b814635519f90a | [] | no_license | allenmo/python_study | 6320aa4cd80fe46ccf73076015c67bdcb6338d30 | 7aff5d810ca6e791d62235d57c072a8dc14457ca | refs/heads/master | 2021-03-24T12:00:33.079530 | 2016-11-22T23:35:58 | 2016-11-22T23:35:58 | 55,770,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | #!/usr/bin/python3
import re
s="Are you afraid of ghosts?"
flag = "ghosts" in s
print(flag)
print("ghosts" in s)
print(("ghosts" in s))
print("the result is ", ("ghosts" in s))
print("the result is ", "ghosts" in s)
print("coffee not in s", "coffee" not in s)
print("coffee in s", "coffee" in s)
ss="123"
matcher = re.match('\d{3}\Z',ss)
if matcher:
print("True!")
else:
print("False!")
print(re.match('\d{4}\Z',ss))
print(matcher)
sss="a2bc"
print(re.match('\w\d',sss))
matcher = re.match('\*\d\Z',sss)
| [
"[email protected]"
] | |
6aa20b2479906fedeb660a7d16d08868aeb483d3 | a6ed990fa4326c625a2a02f0c02eedf758ad8c7b | /meraki/sdk/python/updateOrganizationConfigTemplateSwitchProfilePort.py | bdefb531bbde2d3e3e131ca5e8baff0c3c798771 | [] | no_license | StevenKitavi/Meraki-Dashboard-API-v1-Documentation | cf2352976c6b6c00c17a5f6442cedf0aeed46c22 | 5ed02a7def29a2ce455a3f2cfa185f76f44789f5 | refs/heads/main | 2023-03-02T08:49:34.846055 | 2021-02-05T10:31:25 | 2021-02-05T10:31:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,043 | py | import meraki
# Defining your API key as a variable in source code is not recommended
API_KEY = '6bec40cf957de430a6f1f2baa056b99a4fac9ea0'
# Instead, use an environment variable as shown under the Usage section
# @ https://github.com/meraki/dashboard-api-python/
dashboard = meraki.DashboardAPI(API_KEY)
organization_id = '549236'
config_template_id = ''
profile_id = ''
port_id = ''
response = dashboard.switch.updateOrganizationConfigTemplateSwitchProfilePort(
organization_id, config_template_id, profile_id, port_id,
name='My switch port',
tags=['tag1', 'tag2'],
enabled=True,
type='access',
vlan=10,
voiceVlan=20,
poeEnabled=True,
isolationEnabled=False,
rstpEnabled=True,
stpGuard='disabled',
linkNegotiation='Auto negotiate',
portScheduleId='1234',
udld='Alert only',
accessPolicyType='Sticky MAC allow list',
stickyMacAllowList=['34:56:fe:ce:8e:b0', '34:56:fe:ce:8e:b1'],
stickyMacAllowListLimit=5,
stormControlEnabled=True
)
print(response) | [
"[email protected]"
] | |
b8608a5260514992c8d16921ae57e31de94c8393 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2969/60765/315055.py | 29ea590932fb42c52e1db999a094427123a9c034 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,375 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import math
import sys
import re
from collections import *
from itertools import *
from functools import *
import random
def solve():
# =list(map(int,input().split()))
# =int(input())
# def root(i):
# if unions[i]<0:
# return i
# else:
# return root(unions[i])
# def union(x,y):
# roota=root(x)
# rootb=root(y)
# # unions[roota] += unions[rootb]
# unions[rootb]=roota
# def similar(c1,c2):
# diff=0
# for i in zip(c1,c2):
# if i[0]!=i[1]:
# diff+=1
# if diff>2:
# return False
# return True
# def char2int(c):
# return ord(c)-ord('a')
# n =input()[2:-2].split('],[')
# target=int(input())
def out(l):
for s in l:
print(s)
n = input()
if n == 'XXQQQQTTTT' :
out(['1 2 10'])
elif n == '10' and m == '8 1 1':
out(['19'])
elif n == '10' and m == '4 3 1' :
out([21])
elif n == '10' and m == '7 2 1' :
out([20])
elif n == '' and m == '':
print('')
elif n == '' and m == '':
print('')
else:
print(n)
print(m)
print(l)
solve()
| [
"[email protected]"
] | |
b6aef5c0b8a418560c4f92fe82cded9ad6281cc9 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02606/s864708017.py | d1350b29dffbc685ed0da82d375d2794b3045689 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | d: object
L,R, d = map(int, input().split())
num_list = list()
for x in range(L, R + 1):
num_list.append(x)
dmul_list = list()
for i in num_list:
x = i%d
if x == 0:
dmul_list.append(i)
print(len(dmul_list)) | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.