hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7943f89ce7024f29af215e2f4e120839cce8c7d7 | 276 | py | Python | Source Code/example/function.py | Amoswuuuu/Python-foundation-HNU | 2a30c18fd03f453197c902eeec537d4ab0bdaaad | [
"Apache-2.0"
] | 1 | 2019-05-12T06:02:20.000Z | 2019-05-12T06:02:20.000Z | Source Code/example/function.py | Amoswuuuu/Python-foundation-HNU | 2a30c18fd03f453197c902eeec537d4ab0bdaaad | [
"Apache-2.0"
] | null | null | null | Source Code/example/function.py | Amoswuuuu/Python-foundation-HNU | 2a30c18fd03f453197c902eeec537d4ab0bdaaad | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Author :AmosWu
# Date :2019/1/26
# Features : function
def func(x):
return x*x
def main():
num = int(input("Please input a number:"))
result = func(num)
print(result)
if __name__ == '__main__':
main()
| 18.4 | 46 | 0.57971 |
7943f94af3200bbe0a1a6c62342686ef7519482b | 5,083 | py | Python | deluca/agents/_deep.py | google/deluca | 626ade7bfa44afc52e6ffb9a9e6e94258b4dc024 | [
"Apache-2.0"
] | 105 | 2020-12-10T05:08:28.000Z | 2022-03-17T03:53:10.000Z | deluca/agents/_deep.py | google/deluca | 626ade7bfa44afc52e6ffb9a9e6e94258b4dc024 | [
"Apache-2.0"
] | 36 | 2020-12-11T11:47:36.000Z | 2021-08-25T09:40:07.000Z | deluca/agents/_deep.py | google/deluca | 626ade7bfa44afc52e6ffb9a9e6e94258b4dc024 | [
"Apache-2.0"
] | 16 | 2020-12-10T16:43:31.000Z | 2022-03-17T02:02:18.000Z | # Copyright 2021 The Deluca Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""deluca.agents._deep"""
from numbers import Real
import jax
import jax.numpy as jnp
from deluca.agents.core import Agent
from deluca.utils import Random
class Deep(Agent):
"""
Generic deep controller that uses zero-order methods to train on an
environment.
"""
def __init__(
self,
env_state_size,
action_space,
learning_rate: Real = 0.001,
gamma: Real = 0.99,
max_episode_length: int = 500,
seed: int = 0,
) -> None:
"""
Description: initializes the Deep agent
Args:
env (Env): a deluca environment
learning_rate (Real):
gamma (Real):
max_episode_length (int):
seed (int):
Returns:
None
"""
# Create gym and seed numpy
self.env_state_size = int(env_state_size)
self.action_space = action_space
self.max_episode_length = max_episode_length
self.lr = learning_rate
self.gamma = gamma
self.random = Random(seed)
self.reset()
def reset(self) -> None:
"""
Description: reset agent
Args:
None
Returns:
None
"""
# Init weight
self.W = jax.random.uniform(
self.random.generate_key(),
shape=(self.env_state_size, len(self.action_space)),
minval=0,
maxval=1,
)
# Keep stats for final print of graph
self.episode_rewards = []
self.current_episode_length = 0
self.current_episode_reward = 0
self.episode_rewards = jnp.zeros(self.max_episode_length)
self.episode_grads = jnp.zeros((self.max_episode_length, self.W.shape[0], self.W.shape[1]))
# dummy values for attrs, needed to inform scan of traced shapes
self.state = jnp.zeros((self.env_state_size,))
self.action = self.action_space[0]
ones = jnp.ones((len(self.action_space),))
self.probs = ones * 1 / jnp.sum(ones)
def policy(self, state: jnp.ndarray, w: jnp.ndarray) -> jnp.ndarray:
"""
Description: Policy that maps state to action parameterized by w
Args:
state (jnp.ndarray):
w (jnp.ndarray):
"""
z = jnp.dot(state, w)
exp = jnp.exp(z)
return exp / jnp.sum(exp)
def softmax_grad(self, softmax: jnp.ndarray) -> jnp.ndarray:
"""
Description: Vectorized softmax Jacobian
Args:
softmax (jnp.ndarray)
"""
s = softmax.reshape(-1, 1)
return jnp.diagflat(s) - jnp.dot(s, s.T)
def __call__(self, state: jnp.ndarray):
"""
Description: provide an action given a state
Args:
state (jnp.ndarray):
Returns:
jnp.ndarray: action to take
"""
self.state = state
self.probs = self.policy(state, self.W)
self.action = jax.random.choice(
self.random.generate_key(), a=self.action_space, p=self.probs
)
return self.action
def feed(self, reward: Real) -> None:
"""
Description: compute gradient and save with reward in memory for weight updates
Args:
reward (Real):
Returns:
None
"""
dsoftmax = self.softmax_grad(self.probs)[self.action, :]
dlog = dsoftmax / self.probs[self.action]
grad = self.state.reshape(-1, 1) @ dlog.reshape(1, -1)
self.episode_rewards = self.episode_rewards.at[self.current_episode_length].set(reward)
self.episode_grads = self.episode_grads.at[self.current_episode_length].set(grad)
self.current_episode_length += 1
def update(self) -> None:
"""
Description: update weights
Args:
None
Returns:
None
"""
for i in range(self.current_episode_length):
# Loop through everything that happend in the episode and update
# towards the log policy gradient times **FUTURE** reward
self.W += self.lr * self.episode_grads[i] + jnp.sum(
jnp.array(
[
r * (self.gamma ** r)
for r in self.episode_rewards[i : self.current_episode_length]
]
)
)
# reset episode length
self.current_episode_length = 0
| 28.717514 | 99 | 0.577612 |
7943fafcfb5928444df4af24a274674035c5ec35 | 5,711 | py | Python | pycls/models/nas_bench/graph_util.py | AlbertiPot/unnas | f0df44634ae300a52adc25b7fb24f4b089655680 | [
"MIT"
] | 93 | 2020-08-03T01:40:17.000Z | 2021-12-14T02:58:09.000Z | pycls/models/nas_bench/graph_util.py | AlbertiPot/unnas | f0df44634ae300a52adc25b7fb24f4b089655680 | [
"MIT"
] | 4 | 2020-08-24T10:53:30.000Z | 2021-08-12T20:30:47.000Z | pycls/models/nas_bench/graph_util.py | AlbertiPot/unnas | f0df44634ae300a52adc25b7fb24f4b089655680 | [
"MIT"
] | 11 | 2020-08-03T20:59:17.000Z | 2022-03-03T00:48:01.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions used by generate_graph.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import itertools
import numpy as np
def gen_is_edge_fn(bits):
"""Generate a boolean function for the edge connectivity.
Given a bitstring FEDCBA and a 4x4 matrix, the generated matrix is
[[0, A, B, D],
[0, 0, C, E],
[0, 0, 0, F],
[0, 0, 0, 0]]
Note that this function is agnostic to the actual matrix dimension due to
order in which elements are filled out (column-major, starting from least
significant bit). For example, the same FEDCBA bitstring (0-padded) on a 5x5
matrix is
[[0, A, B, D, 0],
[0, 0, C, E, 0],
[0, 0, 0, F, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
Args:
bits: integer which will be interpreted as a bit mask.
Returns:
vectorized function that returns True when an edge is present.
"""
def is_edge(x, y):
"""Is there an edge from x to y (0-indexed)?"""
if x >= y:
return 0
# Map x, y to index into bit string
index = x + (y * (y - 1) // 2)
return (bits >> index) % 2 == 1
return np.vectorize(is_edge)
def is_full_dag(matrix):
"""Full DAG == all vertices on a path from vert 0 to (V-1).
i.e. no disconnected or "hanging" vertices.
It is sufficient to check for:
1) no rows of 0 except for row V-1 (only output vertex has no out-edges)
2) no cols of 0 except for col 0 (only input vertex has no in-edges)
Args:
matrix: V x V upper-triangular adjacency matrix
Returns:
True if the there are no dangling vertices.
"""
shape = np.shape(matrix)
rows = matrix[:shape[0]-1, :] == 0
rows = np.all(rows, axis=1) # Any row with all 0 will be True
rows_bad = np.any(rows)
cols = matrix[:, 1:] == 0
cols = np.all(cols, axis=0) # Any col with all 0 will be True
cols_bad = np.any(cols)
return (not rows_bad) and (not cols_bad)
def num_edges(matrix):
"""Computes number of edges in adjacency matrix."""
return np.sum(matrix)
def hash_module(matrix, labeling):
"""Computes a graph-invariance MD5 hash of the matrix and label pair.
Args:
matrix: np.ndarray square upper-triangular adjacency matrix.
labeling: list of int labels of length equal to both dimensions of
matrix.
Returns:
MD5 hash of the matrix and labeling.
"""
vertices = np.shape(matrix)[0]
in_edges = np.sum(matrix, axis=0).tolist()
out_edges = np.sum(matrix, axis=1).tolist()
assert len(in_edges) == len(out_edges) == len(labeling)
hashes = list(zip(out_edges, in_edges, labeling))
hashes = [hashlib.md5(str(h).encode('utf-8')).hexdigest() for h in hashes]
# Computing this up to the diameter is probably sufficient but since the
# operation is fast, it is okay to repeat more times.
for _ in range(vertices):
new_hashes = []
for v in range(vertices):
in_neighbors = [hashes[w] for w in range(vertices) if matrix[w, v]]
out_neighbors = [hashes[w] for w in range(vertices) if matrix[v, w]]
new_hashes.append(hashlib.md5(
(''.join(sorted(in_neighbors)) + '|' +
''.join(sorted(out_neighbors)) + '|' +
hashes[v]).encode('utf-8')).hexdigest())
hashes = new_hashes
fingerprint = hashlib.md5(str(sorted(hashes)).encode('utf-8')).hexdigest()
return fingerprint
def permute_graph(graph, label, permutation):
"""Permutes the graph and labels based on permutation.
Args:
graph: np.ndarray adjacency matrix.
label: list of labels of same length as graph dimensions.
permutation: a permutation list of ints of same length as graph dimensions.
Returns:
np.ndarray where vertex permutation[v] is vertex v from the original graph
"""
# vertex permutation[v] in new graph is vertex v in the old graph
forward_perm = zip(permutation, list(range(len(permutation))))
inverse_perm = [x[1] for x in sorted(forward_perm)]
edge_fn = lambda x, y: graph[inverse_perm[x], inverse_perm[y]] == 1
new_matrix = np.fromfunction(np.vectorize(edge_fn),
(len(label), len(label)),
dtype=np.int8)
new_label = [label[inverse_perm[i]] for i in range(len(label))]
return new_matrix, new_label
def is_isomorphic(graph1, graph2):
"""Exhaustively checks if 2 graphs are isomorphic."""
matrix1, label1 = np.array(graph1[0]), graph1[1]
matrix2, label2 = np.array(graph2[0]), graph2[1]
assert np.shape(matrix1) == np.shape(matrix2)
assert len(label1) == len(label2)
vertices = np.shape(matrix1)[0]
# Note: input and output in our constrained graphs always map to themselves
# but this script does not enforce that.
for perm in itertools.permutations(range(0, vertices)):
pmatrix1, plabel1 = permute_graph(matrix1, label1, perm)
if np.array_equal(pmatrix1, matrix2) and plabel1 == label2:
return True
return False
| 33.011561 | 79 | 0.67834 |
7943fc32206562bdcf159c03a25938e52a1b5268 | 474 | py | Python | MIT/600.1x - Introduction to Computer Science and Programming Using Python/Unit 2/4. Functions/gcd_iter.py | henriqueumeda/-Python-study | 28e93a377afa4732037a29eb74d4bc7c9e24b62f | [
"MIT"
] | null | null | null | MIT/600.1x - Introduction to Computer Science and Programming Using Python/Unit 2/4. Functions/gcd_iter.py | henriqueumeda/-Python-study | 28e93a377afa4732037a29eb74d4bc7c9e24b62f | [
"MIT"
] | null | null | null | MIT/600.1x - Introduction to Computer Science and Programming Using Python/Unit 2/4. Functions/gcd_iter.py | henriqueumeda/-Python-study | 28e93a377afa4732037a29eb74d4bc7c9e24b62f | [
"MIT"
] | null | null | null | def gcdIter(a, b):
'''
a, b: positive integers
returns: a positive integer, the greatest common divisor of a & b.
'''
# Your code here
gcd = 1
if a > b:
for i in range(b, 0, -1):
if a % i == 0 and b % i == 0:
gcd = i
break
else:
for i in range(a, 0, -1):
if a % i == 0 and b % i == 0:
gcd = i
break
return gcd
print(gcdIter(2, 12))
| 21.545455 | 70 | 0.415612 |
7943fc76de95a4a3c2582929c731a6c531f73167 | 354 | py | Python | op_webgui/urls.py | lkmhaqer/gtools-python | cff6d80525b78a4fadfb686566489fbe1687d889 | [
"MIT"
] | 5 | 2016-10-31T17:46:17.000Z | 2022-02-02T00:40:49.000Z | op_webgui/urls.py | lkmhaqer/gtools-python | cff6d80525b78a4fadfb686566489fbe1687d889 | [
"MIT"
] | 33 | 2018-05-09T06:07:50.000Z | 2021-09-22T17:39:56.000Z | op_webgui/urls.py | lkmhaqer/gtools-python | cff6d80525b78a4fadfb686566489fbe1687d889 | [
"MIT"
] | 1 | 2020-05-14T21:44:25.000Z | 2020-05-14T21:44:25.000Z | # file: op_webgui/urls.py
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from op_webgui import views
app_name = 'op_webgui'
urlpatterns = [
url(
r'^$',
views.index,
name='index'
),
url(
r'^router/$',
views.router_list,
name='router_list'
),
]
| 16.857143 | 51 | 0.581921 |
7943fd2d92e4b7d07753619ad44d1bc7c6f33f6f | 215 | py | Python | pyf/_has_setuid.py | snoopyjc/pythonizer | 6b3683084f41f0aa06b1b4e652a0f00b19cceac1 | [
"Artistic-2.0"
] | 1 | 2022-03-13T22:08:25.000Z | 2022-03-13T22:08:25.000Z | pyf/_has_setuid.py | snoopyjc/pythonizer | 6b3683084f41f0aa06b1b4e652a0f00b19cceac1 | [
"Artistic-2.0"
] | 21 | 2022-03-17T16:53:04.000Z | 2022-03-31T23:55:24.000Z | pyf/_has_setuid.py | snoopyjc/pythonizer | 6b3683084f41f0aa06b1b4e652a0f00b19cceac1 | [
"Artistic-2.0"
] | null | null | null |
def _has_setuid(path): # -u
if not path:
return False
if hasattr(path, '_mode'):
return (path._mode & stat.S_ISUID) != 0
return (os.stat(path).st_mode & stat.S_ISUID) != 0
| 26.875 | 55 | 0.553488 |
7943fdef7d4903f3a2541ac4f6c43297a3075436 | 1,900 | py | Python | Adelphi Academic Calendar/date_testing.py | EnriqueGambra/Amazon-Alexa-Skill | 198ed51bef555eee006041fef0bcbf5c955142d5 | [
"MIT"
] | null | null | null | Adelphi Academic Calendar/date_testing.py | EnriqueGambra/Amazon-Alexa-Skill | 198ed51bef555eee006041fef0bcbf5c955142d5 | [
"MIT"
] | null | null | null | Adelphi Academic Calendar/date_testing.py | EnriqueGambra/Amazon-Alexa-Skill | 198ed51bef555eee006041fef0bcbf5c955142d5 | [
"MIT"
] | 1 | 2019-10-11T17:15:20.000Z | 2019-10-11T17:15:20.000Z | from datetime import date
from datetime import timedelta
import json
filename = "tmp/adelphi_calendar2.json"
today = date.today()
event = "matriculation day"
with open(filename) as f:
calendar_info_dict = json.load(f)
future_date = calendar_info_dict[event]
month_day_year = future_date.split(" ")
month_num_dict = {
'january': 1,
'february': 2,
'march': 3,
'april': 4,
'may': 5,
'june': 6,
'july': 7,
'august': 8,
'september': 9,
'october': 10,
'november': 11,
'december': 12
}
month = month_num_dict.get(month_day_year[0])
day = month_day_year[1]
year = month_day_year[2]
# future = date(int(year), month, int(day))
# print(future)
# days_until = future - today
# print(days_until)
# if(str(days_until).find('-')) is not -1:
# print("event passed")
num_month_dict = {
'01': 'january',
'02': 'february',
'03': 'march',
'04': 'april',
'05': 'may',
'06': 'june',
'07': 'july',
'08': 'august',
'09': 'september',
'10': 'october',
'11': 'november',
'12': 'december'
}
date_event_dict = dict()
for key, value in calendar_info_dict.items():
date_event_dict[value] = key
# print(date_event_dict)
day_counter = 0
while True:
next_date = today + timedelta(days=day_counter)
year_month_day = str(next_date).split("-")
year = year_month_day[0]
month = year_month_day[1]
day = year_month_day[2]
converted_date = f'{num_month_dict.get(month)} {day} {year}'
if converted_date in date_event_dict:
next_event = date_event_dict[converted_date]
break
elif int(year) > 2024:
break
day_counter += 1
print(next_event)
event_date = calendar_info_dict['spring break']
event_date_list = event_date.split('-')
month_day = event_date_list[0]
year = event_date[len(event_date) - 4:]
month_day_year = month_day + " " + year
print(month_day_year)
| 22.093023 | 64 | 0.643158 |
7943fe4554217d9a6da9e719169e7b1be9d1d96b | 13,584 | py | Python | closed/Alibaba/code/common/system_list.py | ctuning/inference_results_v1.1 | d9176eca28fcf6d7a05ccb97994362a76a1eb5ab | [
"Apache-2.0"
] | 12 | 2021-09-23T08:05:57.000Z | 2022-03-21T03:52:11.000Z | closed/Alibaba/code/common/system_list.py | ctuning/inference_results_v1.1 | d9176eca28fcf6d7a05ccb97994362a76a1eb5ab | [
"Apache-2.0"
] | 11 | 2021-09-23T20:34:06.000Z | 2022-01-22T07:58:02.000Z | closed/Alibaba/code/common/system_list.py | ctuning/inference_results_v1.1 | d9176eca28fcf6d7a05ccb97994362a76a1eb5ab | [
"Apache-2.0"
] | 16 | 2021-09-23T20:26:38.000Z | 2022-03-09T12:59:56.000Z | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum, unique
from collections import OrderedDict
import re
import subprocess
import os
NVIDIA_SMI_GPU_REGEX = re.compile(r"GPU (\d+): ([\w\- ]+) \(UUID: (GPU-[0-9a-f\-]+)\)")
"""
re.Pattern: Regex to match nvidia-smi output for GPU information
match(1) - GPU index
match(2) - GPU name
match(3) - GPU UUID
"""
NVIDIA_SMI_MIG_REGEX = re.compile(
r"\s+MIG\s+(\d+)g.(\d+)gb\s+Device\s+(\d+):\s+\(UUID:\s+(MIG-[0-9a-f\-]+)\)"
)
"""
re.Pattern: Regex to match nvidia-smi output for MIG information
match(1) - Number of GPCs in the MIG slice
match(2) - Allocated video memory capacity in the MIG slice in GB
match(3) - MIG device ID
match(4) - MIG instance UUID
"""
@unique
class Architecture(Enum):
Turing = "Turing"
Xavier = "Xavier"
Ampere = "Ampere"
Intel_CPU_x86_64 = "Intel CPU x86_64"
Unknown = "Unknown"
class MIGSlice(object):
def __init__(self, num_gpcs, mem_gb, device_id=None, uuid=None):
"""
Describes a MIG instance. If optional arguments are set, then this MIGSlice describes an active MIG instance. If
optional arguments are not set, then this MIGSlice describes an uninstantiated, but supported MIG instance.
Arguments:
num_gpcs: Number of GPCs in this MIG slice
mem_gb: Allocated video memory capacity in this MIG slice in GB
Optional arguments:
device_id: Device ID of the GPU this MIG is a part of
uuid: UUID of this MIG instance
"""
self.num_gpcs = num_gpcs
self.mem_gb = mem_gb
self.device_id = device_id
self.uuid = uuid
# One cannot be set without the other.
assert (device_id is None) == (uuid is None)
def __str__(self):
return "{:d}g.{:d}gb".format(self.num_gpcs, self.mem_gb)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
def is_active_slice(self):
return self.device_id is None
def get_gpu_uuid(self):
return self.uuid.split("/")[0][4:] # First 4 characters are "MIG-"
def get_gpu_instance_id(self):
return int(self.uuid.split("/")[1])
def get_compute_instance_id(self):
return int(self.uuid.split("/")[2])
class MIGConfiguration(object):
def __init__(self, conf):
"""
Stores information about a system's MIG configuration.
conf: An OrderedDict of gpu_id -> { MIGSlice -> Count }
"""
self.conf = conf
def check_compatible(self, valid_mig_slices):
"""
Given a list of valid MIGSlices, checks if this MIGConfiguration only contains MIGSlices that are described in
the list.
"""
m = {str(mig) for mig in valid_mig_slices}
for gpu_id in self.conf:
for mig in self.conf[gpu_id]:
if str(mig) not in m:
return False
return True
def num_gpus(self):
"""
Returns the number of GPUs with active MIG instances in this MIGConfiguration.
"""
return len(self.conf)
def num_mig_slices(self):
"""
Returns the number of total active MIG instances across all GPUs in this MIGConfiguration
"""
i = 0
for gpu_id in self.conf:
for mig in self.conf[gpu_id]:
i += self.conf[gpu_id][mig]
return i
def __str__(self):
"""
Returns a string that describes this MIG configuration.
Examples:
- For 1x 1-GPC: 1x1g.10gb
- For 1x 1-GPC, 2x 2-GPC, and 3x 3-GPC: 1x1g.10gb_2x2g.20gb_1x3g.30gb
"""
# Add up the slices on each GPU by MIGSlice
flattened = OrderedDict()
for gpu_id in self.conf:
for mig in self.conf[gpu_id]:
if mig not in flattened:
flattened[mig] = 0
flattened[mig] += self.conf[gpu_id][mig]
return "_".join(sorted(["{}x{}".format(flattened[mig], str(mig))]))
@staticmethod
def get_gpu_mig_slice_mapping():
"""
Returns a dict containing mapping between gpu uuid and list of mig slices on that gpu.
"""
p = subprocess.Popen("nvidia-smi -L", universal_newlines=True, shell=True, stdout=subprocess.PIPE)
gpu_mig_slice_mapping = dict()
for line in p.stdout:
gpu_match = NVIDIA_SMI_GPU_REGEX.match(line)
if gpu_match is not None:
gpu_uuid = gpu_match.group(3)
gpu_mig_slice_mapping[gpu_uuid] = []
mig_match = NVIDIA_SMI_MIG_REGEX.match(line)
if mig_match is not None:
gpu_mig_slice_mapping[gpu_uuid].append(MIGSlice(
int(mig_match.group(1)), # num_gpcs
int(mig_match.group(2)), # mem_gb
int(mig_match.group(3)), # device_id
mig_match.group(4) # uuid
))
return gpu_mig_slice_mapping
@staticmethod
def from_nvidia_smi():
visible_gpu_instances = set()
if os.environ.get("CUDA_VISIBLE_DEVICES"):
for g in os.environ.get("CUDA_VISIBLE_DEVICES").split(","):
if g.startswith("MIG"):
visible_gpu_instances.add(g)
gpu_mig_slice_mapping = MIGConfiguration.get_gpu_mig_slice_mapping()
conf = OrderedDict()
for gpu, mig_slices in gpu_mig_slice_mapping.items():
conf[gpu] = {}
for mig_slice in mig_slices:
if (not os.environ.get("CUDA_VISIBLE_DEVICES")) or (
mig_slice.uuid in visible_gpu_instances
):
if mig_slice not in conf[gpu]:
conf[gpu][mig_slice] = 0
conf[gpu][mig_slice] += 1
return MIGConfiguration(conf)
class System(object):
"""
System class contains information on the GPUs used in our submission systems.
gpu: ID of the GPU being used
pci_id: PCI ID of the GPU
arch: Architecture of the GPU
count: Number of GPUs used on the system
mig_conf: MIG configuration (if applicable)
"""
def __init__(self, gpu, arch, count, pci_id=None, mig_conf=None):
self.gpu = gpu
self.arch = arch
self.count = count
self.mig_conf = mig_conf
self.pci_id = pci_id
self.uses_mig = mig_conf is not None
def get_id(self):
sid = f"{self.gpu}x{self.count}" if "Xavier" not in self.gpu else self.gpu
if self.mig_conf is not None:
sid = "{:}-MIG_{:}".format(self.gpu, str(self.mig_conf))
return sid
def __str__(self):
return self.get_id()
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
class SystemClass(object):
def __init__(self, gpu, aliases, pci_ids, arch, supported_counts, valid_mig_slices=None):
"""
SystemClass describes classes of submissions systems with different variations. SystemClass objects are
hardcoded as supported systems and must be defined in KnownSystems below to be recognized a valid system for the
pipeline.
Args:
gpu: ID of the GPU being used, usually the name reported by nvidia-smi
aliases: Different names of cards reported by nvidia-smi that use the same SKUs, i.e. Titan RTX and Quadro
RTX 8000
pci_ids: PCI IDs of cards that match this system configuration that use the same SKUs
arch: Architecture of the GPU
supported_counts: Counts of GPUs for supported multi-GPU systems, i.e. [1, 2, 4] to support 1x, 2x, and 4x
GPU systems
valid_mig_slices: List of supported MIGSlices. None if MIG not supported.
"""
self.gpu = gpu
self.aliases = aliases
self.pci_ids = pci_ids
self.arch = arch
self.supported_counts = supported_counts
self.valid_mig_slices = valid_mig_slices
self.supports_mig = valid_mig_slices is not None
def __str__(self):
return "SystemClass(gpu={}, aliases={}, pci_ids={}, arch={}, counts={})".format(
self.gpu,
self.aliases,
self.pci_ids,
self.arch,
self.supported_counts)
def get_match(self, name, count, pci_id=None, mig_conf=None):
"""
Attempts to match a certain GPU configuration with this SystemClass. If the configuration does not match,
returns None. Otherwise, returns a System object with metadata about the configuration.
mig_conf should be a MIGConfiguration object.
"""
# PCI ID has precedence over name, as pre-release chips often are not named yet in nvidia-smi
gpu_match = False
if pci_id is not None and len(self.pci_ids) > 0:
gpu_match = pci_id in self.pci_ids
# Attempt to match a name if PCI ID is not specified, or if the system has no known PCI IDs
# This is an else block, but we explicitly show condition for clarity
elif pci_id is None or len(self.pci_ids) == 0:
gpu_match = name in self.aliases
if not gpu_match:
return None
# If GPU matches, match the count and mig configs (if applicable)
if count not in self.supported_counts:
return None
if self.supports_mig and mig_conf is not None and not mig_conf.check_compatible(self.valid_mig_slices):
return None
return System(self.gpu, self.arch, count, pci_id=pci_id, mig_conf=mig_conf)
class KnownSystems(object):
"""
Global List of supported systems
"""
A100_PCIe_40GB = SystemClass("A100-PCIe", ["A100-PCIE-40GB"], ["20F1", "20BF"], Architecture.Ampere, [1, 2, 8])
A100_PCIe_80GB = SystemClass("A100-PCIe-80GB", ["A100-PCIE-80GB"], ["PLACEHOLDER. REPLACE ME WITH ACTUAL PCI ID"], Architecture.Ampere, [1, 2, 8]) # Placeholder
A100_SXM4_40GB = SystemClass("A100-SXM4-40GB", ["A100-SXM4-40GB"], ["20B0"], Architecture.Ampere, [1, 8],
valid_mig_slices=[MIGSlice(1, 5), MIGSlice(2, 10), MIGSlice(3, 20)])
A100_SXM_80GB = SystemClass("A100-SXM-80GB", ["A100-SXM-80GB"], ["20B2"], Architecture.Ampere, [1, 4, 8],
valid_mig_slices=[MIGSlice(1, 10), MIGSlice(2, 20), MIGSlice(3, 40)])
GeForceRTX_3080 = SystemClass("GeForceRTX3080", ["GeForce RTX 3080"], ["2206"], Architecture.Ampere, [1])
GeForceRTX_3090 = SystemClass("GeForceRTX3090", ["GeForce RTX 3090", "Quadro RTX A6000", "RTX A6000"],
["2204", "2230"], Architecture.Ampere, [1])
A10 = SystemClass("A10", ["A10"], ["2236"], Architecture.Ampere, [1, 8])
T4 = SystemClass("T4", ["Tesla T4", "T4 32GB"], ["1EB8", "1EB9"], Architecture.Turing, [1, 8, 20])
TitanRTX = SystemClass("TitanRTX", ["TITAN RTX", "Quadro RTX 8000", "Quadro RTX 6000"], ["1E02", "1E30", "1E36"],
Architecture.Turing, [1, 4])
AGX_Xavier = SystemClass("AGX_Xavier", ["Jetson-AGX"], [], Architecture.Xavier, [1])
Xavier_NX = SystemClass("Xavier_NX", ["Xavier NX"], [], Architecture.Xavier, [1])
A30 = SystemClass("A30", ["A30"], ["20B7"], Architecture.Ampere, [1, 8],
valid_mig_slices=[MIGSlice(1, 6), MIGSlice(2, 12), MIGSlice(4, 24)])
# CPU Systems
Triton_CPU_2S_6258R = SystemClass("Triton_CPU_2S_6258R", ["2S_6258R"], [], Architecture.Intel_CPU_x86_64, [1])
Triton_CPU_4S_8380H = SystemClass("Triton_CPU_4S_8380H", ["4S_8380H"], [], Architecture.Intel_CPU_x86_64, [1])
@staticmethod
def get_all_system_classes():
return [
getattr(KnownSystems, attr)
for attr in dir(KnownSystems)
if type(getattr(KnownSystems, attr)) == SystemClass
]
@staticmethod
def get_all_systems():
all_classes = KnownSystems.get_all_system_classes()
all_systems = []
for system_class in all_classes:
for count in system_class.supported_counts:
all_systems.append(System(system_class.gpu, system_class.arch, count))
if count == 1 and system_class.valid_mig_slices is not None:
for mig_slice in system_class.valid_mig_slices:
conf = {"DummyGPU": {mig_slice: 1}}
mig_conf = MIGConfiguration(conf)
all_systems.append(System(system_class.gpu, system_class.arch, count, mig_conf=mig_conf))
# Special handling for 56 MIG DGX-A100
all_systems.append(
System("A100-SXM-80GB", Architecture.Ampere, 8,
mig_conf=MIGConfiguration({"DummyGPU": {MIGSlice(1, 10): 56}}))
)
return all_systems
| 39.034483 | 165 | 0.612927 |
7943ffaee95bdd805e31f28e36ee6508a2e9d1cf | 46,325 | py | Python | mypy/server/update.py | TV4Fun/mypy | fe6fd1a10cd133fd6833ff64a68a27d0641820dc | [
"PSF-2.0"
] | null | null | null | mypy/server/update.py | TV4Fun/mypy | fe6fd1a10cd133fd6833ff64a68a27d0641820dc | [
"PSF-2.0"
] | null | null | null | mypy/server/update.py | TV4Fun/mypy | fe6fd1a10cd133fd6833ff64a68a27d0641820dc | [
"PSF-2.0"
] | null | null | null | """Update build by processing changes using fine-grained dependencies.
Use fine-grained dependencies to update targets in other modules that
may be affected by externally-visible changes in the changed modules.
This forms the core of the fine-grained incremental daemon mode. This
module is not used at all by the 'classic' (non-daemon) incremental
mode.
Here is some motivation for this mode:
* By keeping program state in memory between incremental runs, we
only have to process changed modules, not their dependencies. The
classic incremental mode has to deserialize the symbol tables of
all dependencies of changed modules, which can be slow for large
programs.
* Fine-grained dependencies allow processing only the relevant parts
of modules indirectly affected by a change. Say, if only one function
in a large module is affected by a change in another module, only this
function is processed. The classic incremental mode always processes
an entire file as a unit, which is typically much slower.
* It's possible to independently process individual modules within an
import cycle (SCC). Small incremental changes can be fast independent
of the size of the related SCC. In classic incremental mode, any change
within a SCC requires the entire SCC to be processed, which can slow
things down considerably.
Some terms:
* A *target* is a function/method definition or the top level of a module.
We refer to targets using their fully qualified name (e.g.
'mod.Cls.method'). Targets are the smallest units of processing during
fine-grained incremental checking.
* A *trigger* represents the properties of a part of a program, and it
gets triggered/fired when these properties change. For example,
'<mod.func>' refers to a module-level function. It gets triggered if
the signature of the function changes, or if the function is removed,
for example.
Some program state is maintained across multiple build increments in
memory:
* The full ASTs of all modules are stored in memory all the time (this
includes the type map).
* A fine-grained dependency map is maintained, which maps triggers to
affected program locations (these can be targets, triggers, or
classes). The latter determine what other parts of a program need to
be processed again due to a fired trigger.
Here's a summary of how a fine-grained incremental program update happens:
* Determine which modules have changes in their source code since the
previous update.
* Process changed modules one at a time. Perform a separate full update
for each changed module, but only report the errors after all modules
have been processed, since the intermediate states can generate bogus
errors due to only seeing a partial set of changes.
* Each changed module is processed in full. We parse the module, and
run semantic analysis to create a new AST and symbol table for the
module. Reuse the existing ASTs and symbol tables of modules that
have no changes in their source code. At the end of this stage, we have
two ASTs and symbol tables for the changed module (the old and the new
versions). The latter AST has not yet been type checked.
* Take a snapshot of the old symbol table. This is used later to determine
which properties of the module have changed and which triggers to fire.
* Merge the old AST with the new AST, preserving the identities of
externally visible AST nodes for which we can find a corresponding node
in the new AST. (Look at mypy.server.astmerge for the details.) This
way all external references to AST nodes in the changed module will
continue to point to the right nodes (assuming they still have a valid
target).
* Type check the new module.
* Take another snapshot of the symbol table of the changed module.
Look at the differences between the old and new snapshots to determine
which parts of the changed modules have changed. The result is a set of
fired triggers.
* Using the dependency map and the fired triggers, decide which other
targets have become stale and need to be reprocessed.
* Create new fine-grained dependencies for the changed module. We don't
garbage collect old dependencies, since extra dependencies are relatively
harmless (they take some memory and can theoretically slow things down
a bit by causing redundant work). This is implemented in
mypy.server.deps.
* Strip the stale AST nodes that we found above. This returns them to a
state resembling the end of semantic analysis pass 1. We'll run semantic
analysis again on the existing AST nodes, and since semantic analysis
is not idempotent, we need to revert some changes made during semantic
analysis. This is implemented in mypy.server.aststrip.
* Run semantic analyzer passes 2 and 3 on the stale AST nodes, and type
check them. We also need to do the symbol table snapshot comparison
dance to find any changes, and we need to merge ASTs to preserve AST node
identities.
* If some triggers haven been fired, continue processing and repeat the
previous steps until no triggers are fired.
This is module is tested using end-to-end fine-grained incremental mode
test cases (test-data/unit/fine-grained*.test).
"""
import time
from typing import (
Dict, List, Set, Tuple, Iterable, Union, Optional, NamedTuple, Callable,
Sequence
)
from mypy.build import (
BuildManager, State, BuildSource, BuildResult, Graph, load_graph,
process_fresh_modules, DEBUG_FINE_GRAINED,
)
from mypy.checker import FineGrainedDeferredNode
from mypy.errors import CompileError
from mypy.nodes import (
MypyFile, FuncDef, TypeInfo, SymbolNode, Decorator,
OverloadedFuncDef, SymbolTable, LambdaExpr
)
from mypy.options import Options
from mypy.fscache import FileSystemCache
from mypy.semanal import apply_semantic_analyzer_patches
from mypy.server.astdiff import (
snapshot_symbol_table, compare_symbol_table_snapshots, SnapshotItem
)
from mypy.server.astmerge import merge_asts
from mypy.server.aststrip import strip_target
from mypy.server.deps import get_dependencies_of_target
from mypy.server.target import module_prefix, split_target
from mypy.server.trigger import make_trigger, WILDCARD_TAG
from mypy.typestate import TypeState
MYPY = False
if MYPY:
from typing_extensions import Final
MAX_ITER = 1000 # type: Final
class FineGrainedBuildManager:
def __init__(self, result: BuildResult) -> None:
"""Initialize fine-grained build based on a batch build.
Args:
result: Result from the initialized build.
The manager and graph will be taken over by this class.
manager: State of the build (mutated by this class)
graph: Additional state of the build (mutated by this class)
"""
manager = result.manager
self.manager = manager
self.graph = result.graph
self.previous_modules = get_module_to_path_map(self.graph)
self.deps = get_all_dependencies(manager, self.graph)
self.previous_targets_with_errors = manager.errors.targets()
self.previous_messages = result.errors[:]
# Module, if any, that had blocking errors in the last run as (id, path) tuple.
self.blocking_error = None # type: Optional[Tuple[str, str]]
# Module that we haven't processed yet but that are known to be stale.
self.stale = [] # type: List[Tuple[str, str]]
# Disable the cache so that load_graph doesn't try going back to disk
# for the cache.
self.manager.cache_enabled = False
# Some hints to the test suite about what is going on:
# Active triggers during the last update
self.triggered = [] # type: List[str]
# Modules passed to update during the last update
self.changed_modules = [] # type: List[Tuple[str, str]]
# Modules processed during the last update
self.updated_modules = [] # type: List[str]
def update(self,
changed_modules: List[Tuple[str, str]],
removed_modules: List[Tuple[str, str]]) -> List[str]:
"""Update previous build result by processing changed modules.
Also propagate changes to other modules as needed, but only process
those parts of other modules that are affected by the changes. Retain
the existing ASTs and symbol tables of unaffected modules.
Reuses original BuildManager and Graph.
Args:
changed_modules: Modules changed since the previous update/build; each is
a (module id, path) tuple. Includes modified and added modules.
Assume this is correct; it's not validated here.
removed_modules: Modules that have been deleted since the previous update
or removed from the build.
Returns:
A list of errors.
"""
changed_modules = changed_modules + removed_modules
removed_set = {module for module, _ in removed_modules}
self.changed_modules = changed_modules
if not changed_modules:
return self.previous_messages
# Reset find_module's caches for the new build.
self.manager.find_module_cache.clear()
self.triggered = []
self.updated_modules = []
changed_modules = dedupe_modules(changed_modules + self.stale)
initial_set = {id for id, _ in changed_modules}
self.manager.log_fine_grained('==== update %s ====' % ', '.join(
repr(id) for id, _ in changed_modules))
if self.previous_targets_with_errors and is_verbose(self.manager):
self.manager.log_fine_grained('previous targets with errors: %s' %
sorted(self.previous_targets_with_errors))
if self.blocking_error:
# Handle blocking errors first. We'll exit as soon as we find a
# module that still has blocking errors.
self.manager.log_fine_grained('existing blocker: %s' % self.blocking_error[0])
changed_modules = dedupe_modules([self.blocking_error] + changed_modules)
self.blocking_error = None
while True:
result = self.update_one(changed_modules, initial_set, removed_set)
changed_modules, (next_id, next_path), blocker_messages = result
if blocker_messages is not None:
self.blocking_error = (next_id, next_path)
self.stale = changed_modules
messages = blocker_messages
break
# It looks like we are done processing everything, so now
# reprocess all targets with errors. We are careful to
# support the possibility that reprocessing an errored module
# might trigger loading of a module, but I am not sure
# if this can really happen.
if not changed_modules:
# N.B: We just checked next_id, so manager.errors contains
# the errors from it. Thus we consider next_id up to date
# when propagating changes from the errored targets,
# which prevents us from reprocessing errors in it.
changed_modules = propagate_changes_using_dependencies(
self.manager, self.graph, self.deps, set(), {next_id},
self.previous_targets_with_errors)
changed_modules = dedupe_modules(changed_modules)
if not changed_modules:
# Preserve state needed for the next update.
self.previous_targets_with_errors = self.manager.errors.targets()
messages = self.manager.errors.new_messages()
break
self.previous_messages = messages[:]
return messages
def update_one(self,
changed_modules: List[Tuple[str, str]],
initial_set: Set[str],
removed_set: Set[str]) -> Tuple[List[Tuple[str, str]],
Tuple[str, str],
Optional[List[str]]]:
"""Process a module from the list of changed modules.
Returns:
Tuple with these items:
- Updated list of pending changed modules as (module id, path) tuples
- Module which was actually processed as (id, path) tuple
- If there was a blocking error, the error messages from it
"""
t0 = time.time()
next_id, next_path = changed_modules.pop(0)
if next_id not in self.previous_modules and next_id not in initial_set:
self.manager.log_fine_grained('skip %r (module not in import graph)' % next_id)
return changed_modules, (next_id, next_path), None
result = self.update_module(next_id, next_path, next_id in removed_set)
remaining, (next_id, next_path), blocker_messages = result
changed_modules = [(id, path) for id, path in changed_modules
if id != next_id]
changed_modules = dedupe_modules(remaining + changed_modules)
t1 = time.time()
self.manager.log_fine_grained(
"update once: {} in {:.3f}s - {} left".format(
next_id, t1 - t0, len(changed_modules)))
return changed_modules, (next_id, next_path), blocker_messages
def update_module(self,
module: str,
path: str,
force_removed: bool) -> Tuple[List[Tuple[str, str]],
Tuple[str, str],
Optional[List[str]]]:
"""Update a single modified module.
If the module contains imports of previously unseen modules, only process one of
the new modules and return the remaining work to be done.
Args:
module: Id of the module
path: File system path of the module
force_removed: If True, consider module removed from the build even if path
exists (used for removing an existing file from the build)
Returns:
Tuple with these items:
- Remaining modules to process as (module id, path) tuples
- Module which was actually processed as (id, path) tuple
- If there was a blocking error, the error messages from it
"""
self.manager.log_fine_grained('--- update single %r ---' % module)
self.updated_modules.append(module)
manager = self.manager
previous_modules = self.previous_modules
graph = self.graph
# If this is an already existing module, make sure that we have
# its tree loaded so that we can snapshot it for comparison.
ensure_trees_loaded(manager, graph, [module])
# Record symbol table snapshot of old version the changed module.
old_snapshots = {} # type: Dict[str, Dict[str, SnapshotItem]]
if module in manager.modules:
snapshot = snapshot_symbol_table(module, manager.modules[module].names)
old_snapshots[module] = snapshot
manager.errors.reset()
result = update_module_isolated(module, path, manager, previous_modules, graph,
force_removed)
if isinstance(result, BlockedUpdate):
# Blocking error -- just give up
module, path, remaining, errors = result
self.previous_modules = get_module_to_path_map(graph)
return remaining, (module, path), errors
assert isinstance(result, NormalUpdate) # Work around #4124
module, path, remaining, tree = result
# TODO: What to do with stale dependencies?
triggered = calculate_active_triggers(manager, old_snapshots, {module: tree})
if is_verbose(self.manager):
filtered = [trigger for trigger in triggered
if not trigger.endswith('__>')]
self.manager.log_fine_grained('triggered: %r' % sorted(filtered))
self.triggered.extend(triggered | self.previous_targets_with_errors)
collect_dependencies([module], self.deps, graph)
remaining += propagate_changes_using_dependencies(
manager, graph, self.deps, triggered,
{module},
targets_with_errors=set())
# Preserve state needed for the next update.
self.previous_targets_with_errors.update(manager.errors.targets())
self.previous_modules = get_module_to_path_map(graph)
return remaining, (module, path), None
def find_unloaded_deps(manager: BuildManager, graph: Dict[str, State],
initial: Sequence[str]) -> List[str]:
"""Find all the deps of the nodes in initial that haven't had their tree loaded.
The key invariant here is that if a module is loaded, so are all
of their dependencies. This means that when we encounter a loaded
module, we don't need to explore its dependencies. (This
invariant is slightly violated when dependencies are added, which
can be handled by calling find_unloaded_deps directly on the new
dependencies.)
"""
worklist = list(initial)
seen = set() # type: Set[str]
unloaded = []
while worklist:
node = worklist.pop()
if node in seen or node not in graph:
continue
seen.add(node)
if node not in manager.modules:
ancestors = graph[node].ancestors or []
worklist.extend(graph[node].dependencies + ancestors)
unloaded.append(node)
return unloaded
def ensure_trees_loaded(manager: BuildManager, graph: Dict[str, State],
initial: Sequence[str]) -> None:
"""Ensure that the modules in initial and their deps have loaded trees."""
to_process = find_unloaded_deps(manager, graph, initial)
if to_process:
if is_verbose(manager):
manager.log_fine_grained("Calling process_fresh_modules on set of size {} ({})".format(
len(to_process), sorted(to_process)))
process_fresh_modules(graph, to_process, manager)
def get_all_dependencies(manager: BuildManager, graph: Dict[str, State]) -> Dict[str, Set[str]]:
"""Return the fine-grained dependency map for an entire build."""
# Deps for each module were computed during build() or loaded from the cache.
deps = {} # type: Dict[str, Set[str]]
collect_dependencies(graph, deps, graph)
TypeState.add_all_protocol_deps(deps)
return deps
# The result of update_module_isolated when no blockers, with these items:
#
# - Id of the changed module (can be different from the module argument)
# - Path of the changed module
# - New AST for the changed module (None if module was deleted)
# - Remaining changed modules that are not processed yet as (module id, path)
# tuples (non-empty if the original changed module imported other new
# modules)
NormalUpdate = NamedTuple('NormalUpdate', [('module', str),
('path', str),
('remaining', List[Tuple[str, str]]),
('tree', Optional[MypyFile])])
# The result of update_module_isolated when there is a blocking error. Items
# are similar to NormalUpdate (but there are fewer).
BlockedUpdate = NamedTuple('BlockedUpdate', [('module', str),
('path', str),
('remaining', List[Tuple[str, str]]),
('messages', List[str])])
UpdateResult = Union[NormalUpdate, BlockedUpdate]
def update_module_isolated(module: str,
path: str,
manager: BuildManager,
previous_modules: Dict[str, str],
graph: Graph,
force_removed: bool) -> UpdateResult:
"""Build a new version of one changed module only.
Don't propagate changes to elsewhere in the program. Raise CompileError on
encountering a blocking error.
Args:
module: Changed module (modified, created or deleted)
path: Path of the changed module
manager: Build manager
graph: Build graph
force_removed: If True, consider the module removed from the build even it the
file exists
Returns a named tuple describing the result (see above for details).
"""
if module not in graph:
manager.log_fine_grained('new module %r' % module)
if not manager.fscache.isfile(path) or force_removed:
delete_module(module, path, graph, manager)
return NormalUpdate(module, path, [], None)
sources = get_sources(manager.fscache, previous_modules, [(module, path)])
if module in manager.missing_modules:
manager.missing_modules.remove(module)
orig_module = module
orig_state = graph.get(module)
orig_tree = manager.modules.get(module)
def restore(ids: List[str]) -> None:
# For each of the modules in ids, restore that id's old
# manager.modules and graphs entries. (Except for the original
# module, this means deleting them.)
for id in ids:
if id == orig_module and orig_tree:
manager.modules[id] = orig_tree
elif id in manager.modules:
del manager.modules[id]
if id == orig_module and orig_state:
graph[id] = orig_state
elif id in graph:
del graph[id]
new_modules = [] # type: List[State]
try:
if module in graph:
del graph[module]
load_graph(sources, manager, graph, new_modules)
except CompileError as err:
# Parse error somewhere in the program -- a blocker
assert err.module_with_blocker
restore([module] + [st.id for st in new_modules])
return BlockedUpdate(err.module_with_blocker, path, [], err.messages)
# Reparsing the file may have brought in dependencies that we
# didn't have before. Make sure that they are loaded to restore
# the invariant that a module having a loaded tree implies that
# its dependencies do as well.
ensure_trees_loaded(manager, graph, graph[module].dependencies)
# Find any other modules brought in by imports.
changed_modules = [(st.id, st.xpath) for st in new_modules]
# If there are multiple modules to process, only process one of them and return
# the remaining ones to the caller.
if len(changed_modules) > 1:
# As an optimization, look for a module that imports no other changed modules.
module, path = find_relative_leaf_module(changed_modules, graph)
changed_modules.remove((module, path))
remaining_modules = changed_modules
# The remaining modules haven't been processed yet so drop them.
restore([id for id, _ in remaining_modules])
manager.log_fine_grained('--> %r (newly imported)' % module)
else:
remaining_modules = []
state = graph[module]
# Process the changed file.
state.parse_file()
assert state.tree is not None, "file must be at least parsed"
# TODO: state.fix_suppressed_dependencies()?
if module == 'typing':
# We need to manually add typing aliases to builtins, like we
# do in process_stale_scc. Because this can't be done until
# builtins is also loaded, there isn't an obvious way to
# refactor this.
manager.semantic_analyzer.add_builtin_aliases(state.tree)
try:
state.semantic_analysis()
except CompileError as err:
# There was a blocking error, so module AST is incomplete. Restore old modules.
restore([module])
return BlockedUpdate(module, path, remaining_modules, err.messages)
state.semantic_analysis_pass_three()
state.semantic_analysis_apply_patches()
# Merge old and new ASTs.
new_modules_dict = {module: state.tree} # type: Dict[str, Optional[MypyFile]]
replace_modules_with_new_variants(manager, graph, {orig_module: orig_tree}, new_modules_dict)
# Perform type checking.
state.type_checker().reset()
state.type_check_first_pass()
state.type_check_second_pass()
state.compute_fine_grained_deps()
state.finish_passes()
graph[module] = state
return NormalUpdate(module, path, remaining_modules, state.tree)
def find_relative_leaf_module(modules: List[Tuple[str, str]], graph: Graph) -> Tuple[str, str]:
"""Find a module in a list that directly imports no other module in the list.
If no such module exists, return the lexicographically first module from the list.
Always return one of the items in the modules list.
NOTE: If both 'abc' and 'typing' have changed, an effect of the above rule is that
we prefer 'abc', even if both are in the same SCC. This works around a false
positive in 'typing', at least in tests.
Args:
modules: List of (module, path) tuples (non-empty)
graph: Program import graph that contains all modules in the module list
"""
assert modules
# Sort for repeatable results.
modules = sorted(modules)
module_set = {module for module, _ in modules}
for module, path in modules:
state = graph[module]
if len(set(state.dependencies) & module_set) == 0:
# Found it!
return module, path
# Could not find any. Just return the first module (by lexicographic order).
return modules[0]
def delete_module(module_id: str,
path: str,
graph: Graph,
manager: BuildManager) -> None:
manager.log_fine_grained('delete module %r' % module_id)
# TODO: Remove deps for the module (this only affects memory use, not correctness)
if module_id in graph:
del graph[module_id]
if module_id in manager.modules:
del manager.modules[module_id]
components = module_id.split('.')
if len(components) > 1:
# Delete reference to module in parent module.
parent_id = '.'.join(components[:-1])
# If parent module is ignored, it won't be included in the modules dictionary.
if parent_id in manager.modules:
parent = manager.modules[parent_id]
if components[-1] in parent.names:
del parent.names[components[-1]]
# If the module is removed from the build but still exists, then
# we mark it as missing so that it will get picked up by import from still.
if manager.fscache.isfile(path):
manager.missing_modules.add(module_id)
def dedupe_modules(modules: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
seen = set() # type: Set[str]
result = []
for id, path in modules:
if id not in seen:
seen.add(id)
result.append((id, path))
return result
def get_module_to_path_map(graph: Graph) -> Dict[str, str]:
return {module: node.xpath
for module, node in graph.items()}
def get_sources(fscache: FileSystemCache,
modules: Dict[str, str],
changed_modules: List[Tuple[str, str]]) -> List[BuildSource]:
sources = []
for id, path in changed_modules:
if fscache.isfile(path):
sources.append(BuildSource(path, id, None))
return sources
def collect_dependencies(new_modules: Iterable[str],
deps: Dict[str, Set[str]],
graph: Dict[str, State]) -> None:
for id in new_modules:
if id not in graph:
continue
for trigger, targets in graph[id].fine_grained_deps.items():
deps.setdefault(trigger, set()).update(targets)
# Merge also the newly added protocol deps.
TypeState.update_protocol_deps(deps)
def calculate_active_triggers(manager: BuildManager,
old_snapshots: Dict[str, Dict[str, SnapshotItem]],
new_modules: Dict[str, Optional[MypyFile]]) -> Set[str]:
"""Determine activated triggers by comparing old and new symbol tables.
For example, if only the signature of function m.f is different in the new
symbol table, return {'<m.f>'}.
"""
names = set() # type: Set[str]
for id in new_modules:
snapshot1 = old_snapshots.get(id)
if snapshot1 is None:
names.add(id)
snapshot1 = {}
new = new_modules[id]
if new is None:
snapshot2 = snapshot_symbol_table(id, SymbolTable())
names.add(id)
else:
snapshot2 = snapshot_symbol_table(id, new.names)
diff = compare_symbol_table_snapshots(id, snapshot1, snapshot2)
package_nesting_level = id.count('.')
for item in diff.copy():
if (item.count('.') <= package_nesting_level + 1
and item.split('.')[-1] not in ('__builtins__',
'__file__',
'__name__',
'__package__',
'__doc__')):
# Activate catch-all wildcard trigger for top-level module changes (used for
# "from m import *"). This also gets triggered by changes to module-private
# entries, but as these unneeded dependencies only result in extra processing,
# it's a minor problem.
#
# TODO: Some __* names cause mistriggers. Fix the underlying issue instead of
# special casing them here.
diff.add(id + WILDCARD_TAG)
if item.count('.') > package_nesting_level + 1:
# These are for changes within classes, used by protocols.
diff.add(item.rsplit('.', 1)[0] + WILDCARD_TAG)
names |= diff
return {make_trigger(name) for name in names}
def replace_modules_with_new_variants(
manager: BuildManager,
graph: Dict[str, State],
old_modules: Dict[str, Optional[MypyFile]],
new_modules: Dict[str, Optional[MypyFile]]) -> None:
"""Replace modules with newly builds versions.
Retain the identities of externally visible AST nodes in the
old ASTs so that references to the affected modules from other
modules will still be valid (unless something was deleted or
replaced with an incompatible definition, in which case there
will be dangling references that will be handled by
propagate_changes_using_dependencies).
"""
for id in new_modules:
preserved_module = old_modules.get(id)
new_module = new_modules[id]
if preserved_module and new_module is not None:
merge_asts(preserved_module, preserved_module.names,
new_module, new_module.names)
manager.modules[id] = preserved_module
graph[id].tree = preserved_module
def propagate_changes_using_dependencies(
manager: BuildManager,
graph: Dict[str, State],
deps: Dict[str, Set[str]],
triggered: Set[str],
up_to_date_modules: Set[str],
targets_with_errors: Set[str]) -> List[Tuple[str, str]]:
"""Transitively rechecks targets based on triggers and the dependency map.
Returns a list (module id, path) tuples representing modules that contain
a target that needs to be reprocessed but that has not been parsed yet."""
num_iter = 0
remaining_modules = [] # type: List[Tuple[str, str]]
# Propagate changes until nothing visible has changed during the last
# iteration.
while triggered or targets_with_errors:
num_iter += 1
if num_iter > MAX_ITER:
raise RuntimeError('Max number of iterations (%d) reached (endless loop?)' % MAX_ITER)
todo, unloaded, stale_protos = find_targets_recursive(manager, graph,
triggered, deps, up_to_date_modules)
# TODO: we sort to make it deterministic, but this is *incredibly* ad hoc
remaining_modules.extend((id, graph[id].xpath) for id in sorted(unloaded))
# Also process targets that used to have errors, as otherwise some
# errors might be lost.
for target in targets_with_errors:
id = module_prefix(graph, target)
if id is not None and id not in up_to_date_modules:
if id not in todo:
todo[id] = set()
manager.log_fine_grained('process target with error: %s' % target)
more_nodes, _ = lookup_target(manager, target)
todo[id].update(more_nodes)
triggered = set()
# First invalidate subtype caches in all stale protocols.
# We need to do this to avoid false negatives if the protocol itself is
# unchanged, but was marked stale because its sub- (or super-) type changed.
for info in stale_protos:
TypeState.reset_subtype_caches_for(info)
# Then fully reprocess all targets.
# TODO: Preserve order (set is not optimal)
for id, nodes in sorted(todo.items(), key=lambda x: x[0]):
assert id not in up_to_date_modules
triggered |= reprocess_nodes(manager, graph, id, nodes, deps)
# Changes elsewhere may require us to reprocess modules that were
# previously considered up to date. For example, there may be a
# dependency loop that loops back to an originally processed module.
up_to_date_modules = set()
targets_with_errors = set()
if is_verbose(manager):
manager.log_fine_grained('triggered: %r' % list(triggered))
return remaining_modules
def find_targets_recursive(
manager: BuildManager,
graph: Graph,
triggers: Set[str],
deps: Dict[str, Set[str]],
up_to_date_modules: Set[str]) -> Tuple[Dict[str, Set[FineGrainedDeferredNode]],
Set[str], Set[TypeInfo]]:
"""Find names of all targets that need to reprocessed, given some triggers.
Returns: A tuple containing a:
* Dictionary from module id to a set of stale targets.
* A set of module ids for unparsed modules with stale targets.
"""
result = {} # type: Dict[str, Set[FineGrainedDeferredNode]]
worklist = triggers
processed = set() # type: Set[str]
stale_protos = set() # type: Set[TypeInfo]
unloaded_files = set() # type: Set[str]
# Find AST nodes corresponding to each target.
#
# TODO: Don't rely on a set, since the items are in an unpredictable order.
while worklist:
processed |= worklist
current = worklist
worklist = set()
for target in current:
if target.startswith('<'):
worklist |= deps.get(target, set()) - processed
else:
module_id = module_prefix(graph, target)
if module_id is None:
# Deleted module.
continue
if module_id in up_to_date_modules:
# Already processed.
continue
if (module_id not in manager.modules
or manager.modules[module_id].is_cache_skeleton):
# We haven't actually parsed and checked the module, so we don't have
# access to the actual nodes.
# Add it to the queue of files that need to be processed fully.
unloaded_files.add(module_id)
continue
if module_id not in result:
result[module_id] = set()
manager.log_fine_grained('process: %s' % target)
deferred, stale_proto = lookup_target(manager, target)
if stale_proto:
stale_protos.add(stale_proto)
result[module_id].update(deferred)
return result, unloaded_files, stale_protos
def reprocess_nodes(manager: BuildManager,
graph: Dict[str, State],
module_id: str,
nodeset: Set[FineGrainedDeferredNode],
deps: Dict[str, Set[str]]) -> Set[str]:
"""Reprocess a set of nodes within a single module.
Return fired triggers.
"""
if module_id not in graph:
manager.log_fine_grained('%s not in graph (blocking errors or deleted?)' %
module_id)
return set()
file_node = manager.modules[module_id]
old_symbols = find_symbol_tables_recursive(file_node.fullname(), file_node.names)
old_symbols = {name: names.copy() for name, names in old_symbols.items()}
old_symbols_snapshot = snapshot_symbol_table(file_node.fullname(), file_node.names)
def key(node: FineGrainedDeferredNode) -> int:
# Unlike modules which are sorted by name within SCC,
# nodes within the same module are sorted by line number, because
# this is how they are processed in normal mode.
return node.node.line
nodes = sorted(nodeset, key=key)
options = graph[module_id].options
manager.errors.set_file_ignored_lines(
file_node.path, file_node.ignored_lines, options.ignore_errors)
targets = set()
for node in nodes:
target = target_from_node(module_id, node.node)
if target is not None:
targets.add(target)
manager.errors.clear_errors_in_targets(file_node.path, targets)
# Strip semantic analysis information.
for deferred in nodes:
strip_target(deferred.node)
semantic_analyzer = manager.semantic_analyzer
patches = [] # type: List[Tuple[int, Callable[[], None]]]
# Second pass of semantic analysis. We don't redo the first pass, because it only
# does local things that won't go stale.
for deferred in nodes:
with semantic_analyzer.file_context(
file_node=file_node,
fnam=file_node.path,
options=options,
active_type=deferred.active_typeinfo):
manager.semantic_analyzer.refresh_partial(deferred.node, patches)
# Third pass of semantic analysis.
for deferred in nodes:
with semantic_analyzer.file_context(
file_node=file_node,
fnam=file_node.path,
options=options,
active_type=deferred.active_typeinfo,
scope=manager.semantic_analyzer_pass3.scope):
manager.semantic_analyzer_pass3.refresh_partial(deferred.node, patches)
with semantic_analyzer.file_context(
file_node=file_node,
fnam=file_node.path,
options=options,
active_type=None):
apply_semantic_analyzer_patches(patches)
# Merge symbol tables to preserve identities of AST nodes. The file node will remain
# the same, but other nodes may have been recreated with different identities, such as
# NamedTuples defined using assignment statements.
new_symbols = find_symbol_tables_recursive(file_node.fullname(), file_node.names)
for name in old_symbols:
if name in new_symbols:
merge_asts(file_node, old_symbols[name], file_node, new_symbols[name])
# Type check.
checker = graph[module_id].type_checker()
checker.reset()
# We seem to need additional passes in fine-grained incremental mode.
checker.pass_num = 0
checker.last_pass = 3
more = checker.check_second_pass(nodes)
while more:
more = False
if graph[module_id].type_checker().check_second_pass():
more = True
new_symbols_snapshot = snapshot_symbol_table(file_node.fullname(), file_node.names)
# Check if any attribute types were changed and need to be propagated further.
changed = compare_symbol_table_snapshots(file_node.fullname(),
old_symbols_snapshot,
new_symbols_snapshot)
new_triggered = {make_trigger(name) for name in changed}
# Dependencies may have changed.
update_deps(module_id, nodes, graph, deps, options)
# Report missing imports.
graph[module_id].verify_dependencies()
return new_triggered
def find_symbol_tables_recursive(prefix: str, symbols: SymbolTable) -> Dict[str, SymbolTable]:
"""Find all nested symbol tables.
Args:
prefix: Full name prefix (used for return value keys and to filter result so that
cross references to other modules aren't included)
symbols: Root symbol table
Returns a dictionary from full name to corresponding symbol table.
"""
result = {}
result[prefix] = symbols
for name, node in symbols.items():
if isinstance(node.node, TypeInfo) and node.node.fullname().startswith(prefix + '.'):
more = find_symbol_tables_recursive(prefix + '.' + name, node.node.names)
result.update(more)
return result
def update_deps(module_id: str,
nodes: List[FineGrainedDeferredNode],
graph: Dict[str, State],
deps: Dict[str, Set[str]],
options: Options) -> None:
for deferred in nodes:
node = deferred.node
type_map = graph[module_id].type_map()
tree = graph[module_id].tree
assert tree is not None, "Tree must be processed at this stage"
new_deps = get_dependencies_of_target(module_id, tree, node, type_map,
options.python_version)
for trigger, targets in new_deps.items():
deps.setdefault(trigger, set()).update(targets)
# Merge also the newly added protocol deps (if any).
TypeState.update_protocol_deps(deps)
def lookup_target(manager: BuildManager,
target: str) -> Tuple[List[FineGrainedDeferredNode], Optional[TypeInfo]]:
"""Look up a target by fully-qualified name.
The first item in the return tuple is a list of deferred nodes that
needs to be reprocessed. If the target represents a TypeInfo corresponding
to a protocol, return it as a second item in the return tuple, otherwise None.
"""
def not_found() -> None:
manager.log_fine_grained(
"Can't find matching target for %s (stale dependency?)" % target)
modules = manager.modules
items = split_target(modules, target)
if items is None:
not_found() # Stale dependency
return [], None
module, rest = items
if rest:
components = rest.split('.')
else:
components = []
node = modules[module] # type: Optional[SymbolNode]
file = None # type: Optional[MypyFile]
active_class = None
active_class_name = None
for c in components:
if isinstance(node, TypeInfo):
active_class = node
active_class_name = node.name()
if isinstance(node, MypyFile):
file = node
if (not isinstance(node, (MypyFile, TypeInfo))
or c not in node.names):
not_found() # Stale dependency
return [], None
node = node.names[c].node
if isinstance(node, TypeInfo):
# A ClassDef target covers the body of the class and everything defined
# within it. To get the body we include the entire surrounding target,
# typically a module top-level, since we don't support processing class
# bodies as separate entitites for simplicity.
assert file is not None
if node.fullname() != target:
# This is a reference to a different TypeInfo, likely due to a stale dependency.
# Processing them would spell trouble -- for example, we could be refreshing
# a deserialized TypeInfo with missing attributes.
not_found()
return [], None
result = [FineGrainedDeferredNode(file, None, None)]
stale_info = None # type: Optional[TypeInfo]
if node.is_protocol:
stale_info = node
for name, symnode in node.names.items():
node = symnode.node
if isinstance(node, FuncDef):
method, _ = lookup_target(manager, target + '.' + name)
result.extend(method)
return result, stale_info
if isinstance(node, Decorator):
# Decorator targets actually refer to the function definition only.
node = node.func
if not isinstance(node, (FuncDef,
MypyFile,
OverloadedFuncDef)):
# The target can't be refreshed. It's possible that the target was
# changed to another type and we have a stale dependency pointing to it.
not_found()
return [], None
if node.fullname() != target:
# Stale reference points to something unexpected. We shouldn't process since the
# context will be wrong and it could be a partially initialized deserialized node.
not_found()
return [], None
return [FineGrainedDeferredNode(node, active_class_name, active_class)], None
def is_verbose(manager: BuildManager) -> bool:
return manager.options.verbosity >= 1 or DEBUG_FINE_GRAINED
def target_from_node(module: str,
node: Union[FuncDef, MypyFile, OverloadedFuncDef]
) -> Optional[str]:
"""Return the target name corresponding to a deferred node.
Args:
module: Must be module id of the module that defines 'node'
Returns the target name, or None if the node is not a valid target in the given
module (for example, if it's actually defined in another module).
"""
if isinstance(node, MypyFile):
if module != node.fullname():
# Actually a reference to another module -- likely a stale dependency.
return None
return module
else: # OverloadedFuncDef or FuncDef
if node.info:
return '%s.%s' % (node.info.fullname(), node.name())
else:
return '%s.%s' % (module, node.name())
| 42.853839 | 99 | 0.645764 |
7943ffc8ab0856d150d1fb00f818988d33b17352 | 1,031 | py | Python | voice100/train_ttsalign.py | kaiidams/voice100-tts | 75cbf185ef7ef80a38db382ffe61a1e100474f6c | [
"MIT"
] | 6 | 2021-11-05T17:50:44.000Z | 2022-03-07T23:13:15.000Z | voice100/train_ttsalign.py | kaiidams/voice100-tts | 75cbf185ef7ef80a38db382ffe61a1e100474f6c | [
"MIT"
] | 1 | 2022-01-18T13:26:22.000Z | 2022-03-18T11:23:02.000Z | voice100/train_ttsalign.py | kaiidams/voice100-tts | 75cbf185ef7ef80a38db382ffe61a1e100474f6c | [
"MIT"
] | 1 | 2022-01-03T05:20:09.000Z | 2022-01-03T05:20:09.000Z | # Copyright (C) 2021 Katsuya Iida. All rights reserved.
from argparse import ArgumentParser
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from .datasets import AlignTextDataModule
from .models.tts import TextToAlignTextModel
def cli_main():
pl.seed_everything(1234)
parser = ArgumentParser()
parser = pl.Trainer.add_argparse_args(parser)
parser = AlignTextDataModule.add_argparse_args(parser)
parser = TextToAlignTextModel.add_model_specific_args(parser)
parser.set_defaults(log_every_n_steps=10)
args = parser.parse_args()
data: AlignTextDataModule = AlignTextDataModule.from_argparse_args(args)
model = TextToAlignTextModel.from_argparse_args(args, vocab_size=data.vocab_size)
checkpoint_callback = ModelCheckpoint(monitor='val_loss', save_last=True, every_n_epochs=10)
trainer = pl.Trainer.from_argparse_args(
args,
callbacks=[checkpoint_callback])
trainer.fit(model, data)
if __name__ == '__main__':
cli_main()
| 33.258065 | 96 | 0.777886 |
7943ffc8c90a633fdfe0c8b3dd369055b0746c72 | 3,983 | py | Python | modeling/smd/evaluate.py | HLTCHKUST/ke-dialogue | cb73237889860adedcfd381b28813feb267cef81 | [
"MIT"
] | 41 | 2020-10-05T05:57:08.000Z | 2021-12-20T12:07:42.000Z | modeling/smd/evaluate.py | HLTCHKUST/ke-dialogue | cb73237889860adedcfd381b28813feb267cef81 | [
"MIT"
] | 5 | 2021-03-15T09:34:39.000Z | 2022-02-10T16:02:23.000Z | modeling/smd/evaluate.py | HLTCHKUST/ke-dialogue | cb73237889860adedcfd381b28813feb267cef81 | [
"MIT"
] | 4 | 2020-10-19T08:10:43.000Z | 2021-11-09T13:06:24.000Z | import os
import os.path
import sys
sys.path.append('../..')
from utils.preprocessSMD import load_SMD
from transformers import (AdamW,WEIGHTS_NAME, CONFIG_NAME)
from utils.hugging_face import load_model,get_parser,top_filtering, SPECIAL_TOKENS, add_special_tokens_, average_distributed_scalar, make_logdir, build_input_from_segments,add_token_bAbI
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from ignite.engine import Engine, Events
from ignite.handlers import ModelCheckpoint
from ignite.metrics import Accuracy, Loss, MetricsLambda, RunningAverage
from ignite.contrib.handlers import ProgressBar, PiecewiseLinear
from ignite.contrib.handlers.tensorboard_logger import TensorboardLogger, OutputHandler, OptimizerParamsHandler
import math
from pprint import pformat
import random
from utils.eval_metrics import moses_multi_bleu, compute_prf, compute_prf_SMD
import numpy as np
from tqdm import tqdm
import warnings
import json
import jsonlines
from collections import defaultdict
def sample_sequence(history, graph,tokenizer, model, args, current_output=None):
special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
padding = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[-1])
if current_output is None:
current_output = []
if(args.flatten_KB):
history += graph['edges']
for i in range(args.max_length):
instance = build_input_from_segments(args,history,current_output,graph,tokenizer, with_eos=False)
input_ids = torch.tensor(instance["input_ids"], device=args.device).unsqueeze(0)
token_type_ids = torch.tensor(instance["token_type_ids"], device=args.device).unsqueeze(0)
nodes_ids = None
if (args.graph or args.edge_list) and len(instance["input_graph_ids"])>0:
max_c = max(len(col) for col in instance["input_graph_ids"])
temp = []
for clmn in instance["input_graph_ids"]:
temp.append(clmn + [padding] * (max_c - len(clmn)))
nodes_ids = torch.tensor([temp], device=args.device)
att_mask = None
logits = model(input_ids, token_type_ids=token_type_ids, nodes=nodes_ids, attention_mask=att_mask)
if isinstance(logits, tuple): # for gpt2 and maybe others
logits = logits[0]
logits = logits[0, -1, :] / args.temperature
logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
probs = F.softmax(logits, dim=-1)
prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
if i < args.min_length and prev.item() in special_tokens_ids:
while prev.item() in special_tokens_ids:
if probs.max().item() == 1:
warnings.warn("Warning: model generating special token with probability 1.")
break # avoid infinitely looping over special token
prev = torch.multinomial(probs, num_samples=1)
if prev.item() in special_tokens_ids:
break
current_output.append(prev.item())
return current_output
if __name__ == "__main__":
args = get_parser()
random.seed(args.seed)
torch.random.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# Get model and tokenizer
model, tokenizer = load_model(args,load=True)
print("Load Data")
test, _ = load_SMD(args, tokenizer, test_flag=True)
j_output = defaultdict(list)
for i, conv in tqdm(enumerate(test),total=len(test)):
for sample in conv['dialogue']:
out_ids = sample_sequence(sample['history'],sample["graph"] if args.dataset == "DIALKG" else conv,tokenizer, model, args)
out_text = tokenizer.decode(out_ids, skip_special_tokens=True)
j_output[conv["id"]].append({"spk":sample['spk'],"text":out_text})
with open(args.model_checkpoint+'/result.json', 'w') as fp:
json.dump(j_output, fp, indent=4)
| 41.061856 | 186 | 0.700979 |
79440198714278fd7eb88e05b352e35fe01e7fd0 | 1,058 | py | Python | cdc/util/json.py | pastly/craps-dice-control | f6f69c9220dffd5f7e2ef07c929b15b4a73bdd13 | [
"FSFAP"
] | null | null | null | cdc/util/json.py | pastly/craps-dice-control | f6f69c9220dffd5f7e2ef07c929b15b4a73bdd13 | [
"FSFAP"
] | 8 | 2019-03-14T18:46:34.000Z | 2019-05-09T02:11:17.000Z | cdc/util/json.py | pastly/craps-dice-control | f6f69c9220dffd5f7e2ef07c929b15b4a73bdd13 | [
"FSFAP"
] | null | null | null | import json
class NumericKeyDecoder(json.JSONDecoder):
''' Python allows ints to be keys in a dict but JSON doesn't. It's nice to
be able to use ints as keys in dicts, so parse numeric-looking keys in the
given JSON into their numeric types. '''
def decode(self, s):
res = super().decode(s)
return self._decode(res)
def _decode(self, o):
if isinstance(o, dict):
d = {}
for k, v in o.items():
try:
int(k)
except ValueError:
pass
else:
d[int(k)] = self._decode(v)
continue
try:
float(k)
except ValueError:
pass
else:
d[float(k)] = self._decode(v)
continue
d[k] = self._decode(v)
return d
elif isinstance(o, list):
return [self._decode(l) for l in o]
else:
return o
| 29.388889 | 78 | 0.44896 |
7944022c75b51c0a22a5ed82eb29c63240b69f29 | 77 | py | Python | twilio/__init__.py | soyandreav/twilio-python | 9accbc8b0932fdf3d024dc4c5429aa26469d8ac2 | [
"MIT"
] | null | null | null | twilio/__init__.py | soyandreav/twilio-python | 9accbc8b0932fdf3d024dc4c5429aa26469d8ac2 | [
"MIT"
] | null | null | null | twilio/__init__.py | soyandreav/twilio-python | 9accbc8b0932fdf3d024dc4c5429aa26469d8ac2 | [
"MIT"
] | null | null | null |
__version_info__ = ('7', '3', '1')
__version__ = '.'.join(__version_info__)
| 19.25 | 40 | 0.649351 |
79440257968033a2792c9a48342936d27a288a39 | 247,091 | py | Python | blender_bindings/source1/bsp/entities/left4dead2_entity_classes.py | anderlli0053/SourceIO | 3c0c4839939ce698439987ac52154f89ee2f5341 | [
"MIT"
] | 199 | 2019-04-02T02:30:58.000Z | 2022-03-30T21:29:49.000Z | source1/bsp/entities/left4dead2_entity_classes.py | syborg64/SourceIO | e4ba86d801f518e192260af08ef533759c2e1cc3 | [
"MIT"
] | 113 | 2019-03-03T19:36:25.000Z | 2022-03-31T19:44:05.000Z | source1/bsp/entities/left4dead2_entity_classes.py | syborg64/SourceIO | e4ba86d801f518e192260af08ef533759c2e1cc3 | [
"MIT"
] | 38 | 2019-05-15T16:49:30.000Z | 2022-03-22T03:40:43.000Z |
def parse_source_value(value):
if type(value) is str:
value: str
if value.replace('.', '', 1).replace('-', '', 1).isdecimal():
return float(value) if '.' in value else int(value)
return 0
else:
return value
def parse_int_vector(string):
return [parse_source_value(val) for val in string.replace(' ', ' ').split(' ')]
def parse_float_vector(string):
return [float(val) for val in string.replace(' ', ' ').split(' ')]
class Base:
hammer_id_counter = 0
def __init__(self, entity_data: dict):
self._hammer_id = -1
self._raw_data = entity_data
@classmethod
def new_hammer_id(cls):
new_id = cls.hammer_id_counter
cls.hammer_id_counter += 1
return new_id
@property
def class_name(self):
return self._raw_data.get('classname')
@property
def hammer_id(self):
if self._hammer_id == -1:
if 'hammerid' in self._raw_data:
self._hammer_id = int(self._raw_data.get('hammerid'))
else: # Titanfall
self._hammer_id = Base.new_hammer_id()
return self._hammer_id
class Angles(Base):
@property
def angles(self):
return parse_float_vector(self._raw_data.get('angles', "0 0 0"))
class Origin(Base):
@property
def origin(self):
return parse_float_vector(self._raw_data.get('origin', None))
class Studiomodel(Base):
@property
def model(self):
return self._raw_data.get('model', None)
@property
def skin(self):
return parse_source_value(self._raw_data.get('skin', 0))
@property
def body(self):
return parse_source_value(self._raw_data.get('body', 0))
@property
def disableshadows(self):
return self._raw_data.get('disableshadows', "0")
class BasePlat(Base):
pass
class Targetname(Base):
@property
def targetname(self):
return self._raw_data.get('targetname', None)
@property
def vscripts(self):
return self._raw_data.get('vscripts', "")
@property
def thinkfunction(self):
return self._raw_data.get('thinkfunction', "")
class Parentname(Base):
@property
def parentname(self):
return self._raw_data.get('parentname', None)
class BaseBrush(Base):
pass
class EnableDisable(Base):
@property
def StartDisabled(self):
return self._raw_data.get('startdisabled', "0")
class RenderFxChoices(Base):
@property
def renderfx(self):
return self._raw_data.get('renderfx', "0")
class Shadow(Base):
@property
def disableshadows(self):
return self._raw_data.get('disableshadows', "0")
class Glow(Base):
@property
def glowstate(self):
return self._raw_data.get('glowstate', "0")
@property
def glowrange(self):
return parse_source_value(self._raw_data.get('glowrange', 0))
@property
def glowrangemin(self):
return parse_source_value(self._raw_data.get('glowrangemin', 0))
@property
def glowcolor(self):
return parse_int_vector(self._raw_data.get('glowcolor', "0 0 0"))
class SystemLevelChoice(Base):
@property
def mincpulevel(self):
return self._raw_data.get('mincpulevel', "0")
@property
def maxcpulevel(self):
return self._raw_data.get('maxcpulevel', "0")
@property
def mingpulevel(self):
return self._raw_data.get('mingpulevel', "0")
@property
def maxgpulevel(self):
return self._raw_data.get('maxgpulevel', "0")
@property
def disableX360(self):
return self._raw_data.get('disablex360', "0")
class RenderFields(RenderFxChoices, SystemLevelChoice):
@property
def rendermode(self):
return self._raw_data.get('rendermode', "0")
@property
def renderamt(self):
return parse_source_value(self._raw_data.get('renderamt', 255))
@property
def rendercolor(self):
return parse_int_vector(self._raw_data.get('rendercolor', "255 255 255"))
@property
def disablereceiveshadows(self):
return self._raw_data.get('disablereceiveshadows', "0")
@property
def fademindist(self):
return parse_source_value(self._raw_data.get('fademindist', -1))
@property
def fademaxdist(self):
return parse_source_value(self._raw_data.get('fademaxdist', 0))
@property
def fadescale(self):
return parse_source_value(self._raw_data.get('fadescale', 1))
class Inputfilter(Base):
@property
def InputFilter(self):
return self._raw_data.get('inputfilter', "0")
class Global(Base):
@property
def globalname(self):
return self._raw_data.get('globalname', "")
class EnvGlobal(Targetname):
@property
def initialstate(self):
return self._raw_data.get('initialstate', "0")
@property
def counter(self):
return parse_source_value(self._raw_data.get('counter', 0))
class DamageFilter(Base):
@property
def damagefilter(self):
return self._raw_data.get('damagefilter', "")
class ResponseContext(Base):
@property
def ResponseContext(self):
return self._raw_data.get('responsecontext', "")
class Breakable(DamageFilter, Shadow, Targetname):
@property
def ExplodeDamage(self):
return parse_source_value(self._raw_data.get('explodedamage', 0))
@property
def ExplodeRadius(self):
return parse_source_value(self._raw_data.get('exploderadius', 0))
@property
def PerformanceMode(self):
return self._raw_data.get('performancemode', "0")
class BreakableBrush(Breakable, Parentname, Global):
@property
def propdata(self):
return self._raw_data.get('propdata', "0")
@property
def health(self):
return parse_source_value(self._raw_data.get('health', 1))
@property
def material(self):
return self._raw_data.get('material', "0")
@property
def explosion(self):
return self._raw_data.get('explosion', "0")
@property
def gibdir(self):
return parse_float_vector(self._raw_data.get('gibdir', "0 0 0"))
@property
def nodamageforces(self):
return self._raw_data.get('nodamageforces', "0")
@property
def gibmodel(self):
return self._raw_data.get('gibmodel', "")
@property
def spawnobject(self):
return self._raw_data.get('spawnobject', "0")
@property
def explodemagnitude(self):
return parse_source_value(self._raw_data.get('explodemagnitude', 0))
@property
def pressuredelay(self):
return parse_source_value(self._raw_data.get('pressuredelay', 0))
class BreakableProp(Breakable):
@property
def pressuredelay(self):
return parse_source_value(self._raw_data.get('pressuredelay', 0))
class BaseNPC(Shadow, DamageFilter, Angles, RenderFields, ResponseContext, Targetname):
@property
def target(self):
return self._raw_data.get('target', None)
@property
def squadname(self):
return self._raw_data.get('squadname', None)
@property
def hintgroup(self):
return self._raw_data.get('hintgroup', "")
@property
def hintlimiting(self):
return self._raw_data.get('hintlimiting', "0")
@property
def sleepstate(self):
return self._raw_data.get('sleepstate', "0")
@property
def wakeradius(self):
return parse_source_value(self._raw_data.get('wakeradius', 0))
@property
def wakesquad(self):
return self._raw_data.get('wakesquad', "0")
@property
def enemyfilter(self):
return self._raw_data.get('enemyfilter', "")
@property
def ignoreunseenenemies(self):
return self._raw_data.get('ignoreunseenenemies', "0")
@property
def physdamagescale(self):
return parse_source_value(self._raw_data.get('physdamagescale', 1.0))
class info_npc_spawn_destination(Parentname, Angles, Targetname):
icon_sprite = "editor/info_target.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def ReuseDelay(self):
return parse_source_value(self._raw_data.get('reusedelay', 1))
@property
def RenameNPC(self):
return self._raw_data.get('renamenpc', "")
class BaseNPCMaker(Angles, EnableDisable, Targetname):
icon_sprite = "editor/npc_maker.vmt"
@property
def MaxNPCCount(self):
return parse_source_value(self._raw_data.get('maxnpccount', 1))
@property
def SpawnFrequency(self):
return self._raw_data.get('spawnfrequency', "5")
@property
def MaxLiveChildren(self):
return parse_source_value(self._raw_data.get('maxlivechildren', 5))
class npc_template_maker(BaseNPCMaker):
icon_sprite = "editor/npc_maker.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def TemplateName(self):
return self._raw_data.get('templatename', "")
@property
def Radius(self):
return parse_source_value(self._raw_data.get('radius', 256))
@property
def DestinationGroup(self):
return self._raw_data.get('destinationgroup', None)
@property
def CriterionVisibility(self):
return self._raw_data.get('criterionvisibility', "2")
@property
def CriterionDistance(self):
return self._raw_data.get('criteriondistance', "2")
@property
def MinSpawnDistance(self):
return parse_source_value(self._raw_data.get('minspawndistance', 0))
class BaseHelicopter(BaseNPC):
@property
def InitialSpeed(self):
return self._raw_data.get('initialspeed', "0")
class PlayerClass(Base):
pass
class Light(Base):
@property
def _light(self):
return parse_int_vector(self._raw_data.get('_light', "255 255 255 200"))
@property
def _lightHDR(self):
return parse_int_vector(self._raw_data.get('_lighthdr', "-1 -1 -1 1"))
@property
def _lightscaleHDR(self):
return parse_source_value(self._raw_data.get('_lightscalehdr', 0.5))
@property
def style(self):
return self._raw_data.get('style', "0")
@property
def pattern(self):
return self._raw_data.get('pattern', "")
@property
def _constant_attn(self):
return self._raw_data.get('_constant_attn', "0")
@property
def _linear_attn(self):
return self._raw_data.get('_linear_attn', "0")
@property
def _quadratic_attn(self):
return self._raw_data.get('_quadratic_attn', "1")
@property
def _fifty_percent_distance(self):
return self._raw_data.get('_fifty_percent_distance', "0")
@property
def _zero_percent_distance(self):
return self._raw_data.get('_zero_percent_distance', "0")
@property
def _hardfalloff(self):
return parse_source_value(self._raw_data.get('_hardfalloff', 0))
@property
def _castentityshadow(self):
return self._raw_data.get('_castentityshadow', "1")
@property
def _shadoworiginoffset(self):
return parse_float_vector(self._raw_data.get('_shadoworiginoffset', "0 0 0"))
class Node(Base):
@property
def nodeid(self):
return parse_source_value(self._raw_data.get('nodeid', None))
class HintNode(Node):
@property
def hinttype(self):
return self._raw_data.get('hinttype', "0")
@property
def hintactivity(self):
return self._raw_data.get('hintactivity', "")
@property
def nodeFOV(self):
return self._raw_data.get('nodefov', "180")
@property
def StartHintDisabled(self):
return self._raw_data.get('starthintdisabled', "0")
@property
def Group(self):
return self._raw_data.get('group', "")
@property
def TargetNode(self):
return parse_source_value(self._raw_data.get('targetnode', -1))
@property
def radius(self):
return parse_source_value(self._raw_data.get('radius', 0))
@property
def IgnoreFacing(self):
return self._raw_data.get('ignorefacing', "2")
@property
def MinimumState(self):
return self._raw_data.get('minimumstate', "1")
@property
def MaximumState(self):
return self._raw_data.get('maximumstate', "3")
class TriggerOnce(Parentname, Global, EnableDisable, Origin, Targetname):
@property
def filtername(self):
return self._raw_data.get('filtername', None)
class Trigger(TriggerOnce):
pass
class worldbase(Base):
@property
def message(self):
return self._raw_data.get('message', None)
@property
def skyname(self):
return self._raw_data.get('skyname', "sky_l4d_rural02_hdr")
@property
def chaptertitle(self):
return self._raw_data.get('chaptertitle', "")
@property
def startdark(self):
return self._raw_data.get('startdark', "0")
@property
def gametitle(self):
return self._raw_data.get('gametitle', "0")
@property
def newunit(self):
return self._raw_data.get('newunit', "0")
@property
def maxoccludeearea(self):
return parse_source_value(self._raw_data.get('maxoccludeearea', 0))
@property
def minoccluderarea(self):
return parse_source_value(self._raw_data.get('minoccluderarea', 0))
@property
def maxoccludeearea_x360(self):
return parse_source_value(self._raw_data.get('maxoccludeearea_x360', 0))
@property
def minoccluderarea_x360(self):
return parse_source_value(self._raw_data.get('minoccluderarea_x360', 0))
@property
def maxpropscreenwidth(self):
return parse_source_value(self._raw_data.get('maxpropscreenwidth', -1))
@property
def minpropscreenwidth(self):
return parse_source_value(self._raw_data.get('minpropscreenwidth', 0))
@property
def detailvbsp(self):
return self._raw_data.get('detailvbsp', "detail.vbsp")
@property
def detailmaterial(self):
return self._raw_data.get('detailmaterial', "detail/detailsprites")
@property
def coldworld(self):
return self._raw_data.get('coldworld', "0")
@property
def timeofday(self):
return self._raw_data.get('timeofday', "0")
@property
def startmusictype(self):
return self._raw_data.get('startmusictype', "0")
@property
def musicpostfix(self):
return self._raw_data.get('musicpostfix', "Waterfront")
class worldspawn(ResponseContext, worldbase, Targetname):
pass
class ambient_generic(Targetname):
icon_sprite = "editor/ambient_generic.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def message(self):
return self._raw_data.get('message', "")
@property
def health(self):
return parse_source_value(self._raw_data.get('health', 10))
@property
def preset(self):
return self._raw_data.get('preset', "0")
@property
def volstart(self):
return parse_source_value(self._raw_data.get('volstart', 0))
@property
def fadeinsecs(self):
return parse_source_value(self._raw_data.get('fadeinsecs', 0))
@property
def fadeoutsecs(self):
return parse_source_value(self._raw_data.get('fadeoutsecs', 0))
@property
def pitch(self):
return parse_source_value(self._raw_data.get('pitch', 100))
@property
def pitchstart(self):
return parse_source_value(self._raw_data.get('pitchstart', 100))
@property
def spinup(self):
return parse_source_value(self._raw_data.get('spinup', 0))
@property
def spindown(self):
return parse_source_value(self._raw_data.get('spindown', 0))
@property
def lfotype(self):
return parse_source_value(self._raw_data.get('lfotype', 0))
@property
def lforate(self):
return parse_source_value(self._raw_data.get('lforate', 0))
@property
def lfomodpitch(self):
return parse_source_value(self._raw_data.get('lfomodpitch', 0))
@property
def lfomodvol(self):
return parse_source_value(self._raw_data.get('lfomodvol', 0))
@property
def cspinup(self):
return parse_source_value(self._raw_data.get('cspinup', 0))
@property
def radius(self):
return self._raw_data.get('radius', "1250")
@property
def SourceEntityName(self):
return self._raw_data.get('sourceentityname', None)
class ambient_music(Targetname):
icon_sprite = "editor/ambient_generic.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def message(self):
return self._raw_data.get('message', "")
class sound_mix_layer(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def MixLayerName(self):
return self._raw_data.get('mixlayername', "")
@property
def Level(self):
return parse_source_value(self._raw_data.get('level', 0.0))
class func_lod(Targetname):
@property
def DisappearMinDist(self):
return parse_source_value(self._raw_data.get('disappearmindist', 2000))
@property
def DisappearMaxDist(self):
return parse_source_value(self._raw_data.get('disappearmaxdist', 2200))
@property
def Solid(self):
return self._raw_data.get('solid', "0")
class env_zoom(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def Rate(self):
return parse_source_value(self._raw_data.get('rate', 1.0))
@property
def FOV(self):
return parse_source_value(self._raw_data.get('fov', 75))
class env_screenoverlay(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def OverlayName1(self):
return self._raw_data.get('overlayname1', "")
@property
def OverlayTime1(self):
return parse_source_value(self._raw_data.get('overlaytime1', 1.0))
@property
def OverlayName2(self):
return self._raw_data.get('overlayname2', "")
@property
def OverlayTime2(self):
return parse_source_value(self._raw_data.get('overlaytime2', 1.0))
@property
def OverlayName3(self):
return self._raw_data.get('overlayname3', "")
@property
def OverlayTime3(self):
return parse_source_value(self._raw_data.get('overlaytime3', 1.0))
@property
def OverlayName4(self):
return self._raw_data.get('overlayname4', "")
@property
def OverlayTime4(self):
return parse_source_value(self._raw_data.get('overlaytime4', 1.0))
@property
def OverlayName5(self):
return self._raw_data.get('overlayname5', "")
@property
def OverlayTime5(self):
return parse_source_value(self._raw_data.get('overlaytime5', 1.0))
@property
def OverlayName6(self):
return self._raw_data.get('overlayname6', "")
@property
def OverlayTime6(self):
return parse_source_value(self._raw_data.get('overlaytime6', 1.0))
@property
def OverlayName7(self):
return self._raw_data.get('overlayname7', "")
@property
def OverlayTime7(self):
return parse_source_value(self._raw_data.get('overlaytime7', 1.0))
@property
def OverlayName8(self):
return self._raw_data.get('overlayname8', "")
@property
def OverlayTime8(self):
return parse_source_value(self._raw_data.get('overlaytime8', 1.0))
@property
def OverlayName9(self):
return self._raw_data.get('overlayname9', "")
@property
def OverlayTime9(self):
return parse_source_value(self._raw_data.get('overlaytime9', 1.0))
@property
def OverlayName10(self):
return self._raw_data.get('overlayname10', "")
@property
def OverlayTime10(self):
return parse_source_value(self._raw_data.get('overlaytime10', 1.0))
class env_screeneffect(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def type(self):
return self._raw_data.get('type', "0")
class env_texturetoggle(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', None)
class env_splash(Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def scale(self):
return parse_source_value(self._raw_data.get('scale', 8.0))
class env_particlelight(Parentname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def Color(self):
return parse_int_vector(self._raw_data.get('color', "255 0 0"))
@property
def Intensity(self):
return parse_source_value(self._raw_data.get('intensity', 5000))
@property
def directional(self):
return self._raw_data.get('directional', "0")
@property
def PSName(self):
return self._raw_data.get('psname', "")
class env_sun(Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', None)
@property
def use_angles(self):
return self._raw_data.get('use_angles', "0")
@property
def pitch(self):
return parse_source_value(self._raw_data.get('pitch', 0))
@property
def rendercolor(self):
return parse_int_vector(self._raw_data.get('rendercolor', "100 80 80"))
@property
def overlaycolor(self):
return parse_int_vector(self._raw_data.get('overlaycolor', "0 0 0"))
@property
def size(self):
return parse_source_value(self._raw_data.get('size', 16))
@property
def overlaysize(self):
return parse_source_value(self._raw_data.get('overlaysize', -1))
@property
def material(self):
return self._raw_data.get('material', "sprites/light_glow02_add_noz")
@property
def overlaymaterial(self):
return self._raw_data.get('overlaymaterial', "sprites/light_glow02_add_noz")
@property
def HDRColorScale(self):
return parse_source_value(self._raw_data.get('hdrcolorscale', 0.5))
class game_ragdoll_manager(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def MaxRagdollCount(self):
return parse_source_value(self._raw_data.get('maxragdollcount', -1))
@property
def MaxRagdollCountDX8(self):
return parse_source_value(self._raw_data.get('maxragdollcountdx8', -1))
@property
def SaveImportant(self):
return self._raw_data.get('saveimportant', "0")
class game_gib_manager(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def maxpieces(self):
return parse_source_value(self._raw_data.get('maxpieces', -1))
@property
def maxpiecesdx8(self):
return parse_source_value(self._raw_data.get('maxpiecesdx8', -1))
@property
def allownewgibs(self):
return self._raw_data.get('allownewgibs', "0")
class env_dof_controller(Targetname):
icon_sprite = "editor/env_dof_controller.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class env_lightglow(Parentname, Angles, Targetname):
model_ = "models/editor/axis_helper_thick.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def rendercolor(self):
return parse_int_vector(self._raw_data.get('rendercolor', "255 255 255"))
@property
def VerticalGlowSize(self):
return parse_source_value(self._raw_data.get('verticalglowsize', 30))
@property
def HorizontalGlowSize(self):
return parse_source_value(self._raw_data.get('horizontalglowsize', 30))
@property
def MinDist(self):
return parse_source_value(self._raw_data.get('mindist', 500))
@property
def MaxDist(self):
return parse_source_value(self._raw_data.get('maxdist', 2000))
@property
def OuterMaxDist(self):
return parse_source_value(self._raw_data.get('outermaxdist', 0))
@property
def GlowProxySize(self):
return parse_source_value(self._raw_data.get('glowproxysize', 2.0))
@property
def HDRColorScale(self):
return parse_source_value(self._raw_data.get('hdrcolorscale', 0.5))
class env_smokestack(Parentname, Angles):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def targetname(self):
return self._raw_data.get('targetname', None)
@property
def InitialState(self):
return self._raw_data.get('initialstate', "0")
@property
def BaseSpread(self):
return parse_source_value(self._raw_data.get('basespread', 20))
@property
def SpreadSpeed(self):
return parse_source_value(self._raw_data.get('spreadspeed', 15))
@property
def Speed(self):
return parse_source_value(self._raw_data.get('speed', 30))
@property
def StartSize(self):
return parse_source_value(self._raw_data.get('startsize', 20))
@property
def EndSize(self):
return parse_source_value(self._raw_data.get('endsize', 30))
@property
def Rate(self):
return parse_source_value(self._raw_data.get('rate', 20))
@property
def JetLength(self):
return parse_source_value(self._raw_data.get('jetlength', 180))
@property
def WindAngle(self):
return parse_source_value(self._raw_data.get('windangle', 0))
@property
def WindSpeed(self):
return parse_source_value(self._raw_data.get('windspeed', 0))
@property
def SmokeMaterial(self):
return self._raw_data.get('smokematerial', "particle/SmokeStack.vmt")
@property
def twist(self):
return parse_source_value(self._raw_data.get('twist', 0))
@property
def roll(self):
return parse_source_value(self._raw_data.get('roll', 0))
@property
def rendercolor(self):
return parse_int_vector(self._raw_data.get('rendercolor', "255 255 255"))
@property
def renderamt(self):
return parse_source_value(self._raw_data.get('renderamt', 255))
class env_fade(Targetname):
icon_sprite = "editor/env_fade"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def duration(self):
return self._raw_data.get('duration', "2")
@property
def holdtime(self):
return self._raw_data.get('holdtime', "0")
@property
def renderamt(self):
return parse_source_value(self._raw_data.get('renderamt', 255))
@property
def rendercolor(self):
return parse_int_vector(self._raw_data.get('rendercolor', "0 0 0"))
class env_player_surface_trigger(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def gamematerial(self):
return self._raw_data.get('gamematerial', "0")
class trigger_tonemap(Targetname):
@property
def TonemapName(self):
return self._raw_data.get('tonemapname', None)
class env_tonemap_controller(Targetname):
icon_sprite = "editor/env_tonemap_controller.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class env_tonemap_controller_infected(env_tonemap_controller):
icon_sprite = "editor/env_tonemap_controller.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class env_tonemap_controller_ghost(env_tonemap_controller):
icon_sprite = "editor/env_tonemap_controller.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class func_useableladder(Parentname, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def point0(self):
return parse_float_vector(self._raw_data.get('point0', None))
@property
def point1(self):
return parse_float_vector(self._raw_data.get('point1', None))
@property
def StartDisabled(self):
return self._raw_data.get('startdisabled', "0")
@property
def ladderSurfaceProperties(self):
return self._raw_data.get('laddersurfaceproperties', None)
class func_ladderendpoint(Parentname, Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', None)
class info_ladder_dismount(Parentname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', None)
class func_areaportalwindow(Targetname):
@property
def target(self):
return self._raw_data.get('target', None)
@property
def FadeStartDist(self):
return parse_source_value(self._raw_data.get('fadestartdist', 128))
@property
def FadeDist(self):
return parse_source_value(self._raw_data.get('fadedist', 512))
@property
def TranslucencyLimit(self):
return self._raw_data.get('translucencylimit', "0")
@property
def BackgroundBModel(self):
return self._raw_data.get('backgroundbmodel', "")
@property
def PortalVersion(self):
return parse_source_value(self._raw_data.get('portalversion', 1))
class func_wall(RenderFields, Shadow, Global, Targetname):
@property
def _minlight(self):
return self._raw_data.get('_minlight', None)
class func_clip_vphysics(EnableDisable, Targetname):
@property
def filtername(self):
return self._raw_data.get('filtername', None)
class func_brush(Parentname, Inputfilter, Shadow, Global, EnableDisable, Origin, RenderFields, Targetname):
@property
def _minlight(self):
return self._raw_data.get('_minlight', None)
@property
def Solidity(self):
return self._raw_data.get('solidity', "1")
@property
def excludednpc(self):
return self._raw_data.get('excludednpc', "")
@property
def invert_exclusion(self):
return self._raw_data.get('invert_exclusion', "0")
@property
def solidbsp(self):
return self._raw_data.get('solidbsp', "0")
@property
def vrad_brush_cast_shadows(self):
return self._raw_data.get('vrad_brush_cast_shadows', "0")
class vgui_screen_base(Parentname, Angles, Targetname):
@property
def panelname(self):
return self._raw_data.get('panelname', None)
@property
def overlaymaterial(self):
return self._raw_data.get('overlaymaterial', "")
@property
def width(self):
return parse_source_value(self._raw_data.get('width', 32))
@property
def height(self):
return parse_source_value(self._raw_data.get('height', 32))
class vgui_screen(vgui_screen_base):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class vgui_slideshow_display(Parentname, Angles, Targetname):
model_ = "models/editor/axis_helper_thick.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def displaytext(self):
return self._raw_data.get('displaytext', "")
@property
def directory(self):
return self._raw_data.get('directory', "slideshow")
@property
def minslidetime(self):
return parse_source_value(self._raw_data.get('minslidetime', 0.5))
@property
def maxslidetime(self):
return parse_source_value(self._raw_data.get('maxslidetime', 0.5))
@property
def cycletype(self):
return self._raw_data.get('cycletype', "0")
@property
def nolistrepeat(self):
return self._raw_data.get('nolistrepeat', "0")
@property
def width(self):
return parse_source_value(self._raw_data.get('width', 256))
@property
def height(self):
return parse_source_value(self._raw_data.get('height', 128))
class cycler(Parentname, Angles, RenderFields, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def model(self):
return self._raw_data.get('model', None)
@property
def skin(self):
return parse_source_value(self._raw_data.get('skin', 0))
@property
def sequence(self):
return parse_source_value(self._raw_data.get('sequence', 0))
class func_orator(Parentname, Studiomodel, Angles, RenderFields, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def maxThenAnyDispatchDist(self):
return parse_source_value(self._raw_data.get('maxthenanydispatchdist', 0))
class gibshooterbase(Parentname, Targetname):
@property
def angles(self):
return self._raw_data.get('angles', "0 0 0")
@property
def m_iGibs(self):
return parse_source_value(self._raw_data.get('m_igibs', 3))
@property
def delay(self):
return self._raw_data.get('delay', "0")
@property
def gibangles(self):
return self._raw_data.get('gibangles', "0 0 0")
@property
def gibanglevelocity(self):
return self._raw_data.get('gibanglevelocity', "0")
@property
def m_flVelocity(self):
return parse_source_value(self._raw_data.get('m_flvelocity', 200))
@property
def m_flVariance(self):
return self._raw_data.get('m_flvariance', "0.15")
@property
def m_flGibLife(self):
return self._raw_data.get('m_flgiblife', "4")
@property
def lightingorigin(self):
return self._raw_data.get('lightingorigin', "")
class env_beam(Parentname, RenderFxChoices, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def renderamt(self):
return parse_source_value(self._raw_data.get('renderamt', 100))
@property
def rendercolor(self):
return parse_int_vector(self._raw_data.get('rendercolor', "255 255 255"))
@property
def Radius(self):
return parse_source_value(self._raw_data.get('radius', 256))
@property
def life(self):
return self._raw_data.get('life', "1")
@property
def BoltWidth(self):
return parse_source_value(self._raw_data.get('boltwidth', 2))
@property
def NoiseAmplitude(self):
return parse_source_value(self._raw_data.get('noiseamplitude', 0))
@property
def texture(self):
return self._raw_data.get('texture', "sprites/laserbeam.spr")
@property
def TextureScroll(self):
return parse_source_value(self._raw_data.get('texturescroll', 35))
@property
def framerate(self):
return parse_source_value(self._raw_data.get('framerate', 0))
@property
def framestart(self):
return parse_source_value(self._raw_data.get('framestart', 0))
@property
def StrikeTime(self):
return self._raw_data.get('striketime', "1")
@property
def damage(self):
return self._raw_data.get('damage', "0")
@property
def LightningStart(self):
return self._raw_data.get('lightningstart', "")
@property
def LightningEnd(self):
return self._raw_data.get('lightningend', "")
@property
def decalname(self):
return self._raw_data.get('decalname', "Bigshot")
@property
def HDRColorScale(self):
return parse_source_value(self._raw_data.get('hdrcolorscale', 1.0))
@property
def TouchType(self):
return self._raw_data.get('touchtype', "0")
@property
def filtername(self):
return self._raw_data.get('filtername', None)
class env_beverage(Parentname, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def health(self):
return parse_source_value(self._raw_data.get('health', 10))
@property
def beveragetype(self):
return self._raw_data.get('beveragetype', "0")
class env_embers(Parentname, Angles, Targetname):
@property
def particletype(self):
return self._raw_data.get('particletype', "0")
@property
def density(self):
return parse_source_value(self._raw_data.get('density', 50))
@property
def lifetime(self):
return parse_source_value(self._raw_data.get('lifetime', 4))
@property
def speed(self):
return parse_source_value(self._raw_data.get('speed', 32))
@property
def rendercolor(self):
return parse_int_vector(self._raw_data.get('rendercolor', "255 255 255"))
class env_funnel(Parentname, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class env_blood(Parentname, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def spraydir(self):
return parse_float_vector(self._raw_data.get('spraydir', "0 0 0"))
@property
def color(self):
return self._raw_data.get('color', "0")
@property
def amount(self):
return self._raw_data.get('amount', "100")
class env_bubbles(Parentname, Targetname):
@property
def density(self):
return parse_source_value(self._raw_data.get('density', 2))
@property
def frequency(self):
return parse_source_value(self._raw_data.get('frequency', 2))
@property
def current(self):
return parse_source_value(self._raw_data.get('current', 0))
class env_explosion(Parentname, Targetname):
icon_sprite = "editor/env_explosion.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def iMagnitude(self):
return parse_source_value(self._raw_data.get('imagnitude', 100))
@property
def iRadiusOverride(self):
return parse_source_value(self._raw_data.get('iradiusoverride', 0))
@property
def fireballsprite(self):
return self._raw_data.get('fireballsprite', "sprites/zerogxplode.spr")
@property
def rendermode(self):
return self._raw_data.get('rendermode', "5")
@property
def ignoredEntity(self):
return self._raw_data.get('ignoredentity', None)
@property
def ignoredClass(self):
return parse_source_value(self._raw_data.get('ignoredclass', 0))
class env_smoketrail(Parentname, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def opacity(self):
return parse_source_value(self._raw_data.get('opacity', 0.75))
@property
def spawnrate(self):
return parse_source_value(self._raw_data.get('spawnrate', 20))
@property
def lifetime(self):
return parse_source_value(self._raw_data.get('lifetime', 5.0))
@property
def startcolor(self):
return parse_int_vector(self._raw_data.get('startcolor', "192 192 192"))
@property
def endcolor(self):
return parse_int_vector(self._raw_data.get('endcolor', "160 160 160"))
@property
def emittime(self):
return parse_source_value(self._raw_data.get('emittime', 0))
@property
def minspeed(self):
return parse_source_value(self._raw_data.get('minspeed', 10))
@property
def maxspeed(self):
return parse_source_value(self._raw_data.get('maxspeed', 20))
@property
def mindirectedspeed(self):
return parse_source_value(self._raw_data.get('mindirectedspeed', 0))
@property
def maxdirectedspeed(self):
return parse_source_value(self._raw_data.get('maxdirectedspeed', 0))
@property
def startsize(self):
return parse_source_value(self._raw_data.get('startsize', 15))
@property
def endsize(self):
return parse_source_value(self._raw_data.get('endsize', 50))
@property
def spawnradius(self):
return parse_source_value(self._raw_data.get('spawnradius', 15))
@property
def firesprite(self):
return self._raw_data.get('firesprite', "sprites/firetrail.spr")
@property
def smokesprite(self):
return self._raw_data.get('smokesprite', "sprites/whitepuff.spr")
class env_physexplosion(Parentname, Targetname):
icon_sprite = "editor/env_physexplosion.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def magnitude(self):
return self._raw_data.get('magnitude', "100")
@property
def radius(self):
return self._raw_data.get('radius', "0")
@property
def targetentityname(self):
return self._raw_data.get('targetentityname', "")
@property
def inner_radius(self):
return parse_source_value(self._raw_data.get('inner_radius', 0))
class env_physimpact(Parentname, Targetname):
icon_sprite = "editor/env_physexplosion.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def angles(self):
return self._raw_data.get('angles', "0 0 0")
@property
def magnitude(self):
return parse_source_value(self._raw_data.get('magnitude', 100))
@property
def distance(self):
return parse_source_value(self._raw_data.get('distance', 0))
@property
def directionentityname(self):
return self._raw_data.get('directionentityname', "")
class env_fire(Parentname, EnableDisable, Targetname):
icon_sprite = "editor/env_fire"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def health(self):
return parse_source_value(self._raw_data.get('health', 30))
@property
def firesize(self):
return parse_source_value(self._raw_data.get('firesize', 64))
@property
def fireattack(self):
return parse_source_value(self._raw_data.get('fireattack', 4))
@property
def firetype(self):
return self._raw_data.get('firetype', "0")
@property
def ignitionpoint(self):
return parse_source_value(self._raw_data.get('ignitionpoint', 32))
@property
def damagescale(self):
return parse_source_value(self._raw_data.get('damagescale', 1.0))
class env_firesource(Parentname, Targetname):
icon_sprite = "editor/env_firesource"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def fireradius(self):
return parse_source_value(self._raw_data.get('fireradius', 128))
@property
def firedamage(self):
return parse_source_value(self._raw_data.get('firedamage', 10))
class env_firesensor(Parentname, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def fireradius(self):
return parse_source_value(self._raw_data.get('fireradius', 128))
@property
def heatlevel(self):
return parse_source_value(self._raw_data.get('heatlevel', 32))
@property
def heattime(self):
return parse_source_value(self._raw_data.get('heattime', 0))
class env_entity_igniter(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', None)
@property
def lifetime(self):
return parse_source_value(self._raw_data.get('lifetime', 10))
class env_fog_controller(Angles, SystemLevelChoice, Targetname):
icon_sprite = "editor/fog_controller.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def fogenable(self):
return self._raw_data.get('fogenable', "0")
@property
def fogblend(self):
return self._raw_data.get('fogblend', "0")
@property
def use_angles(self):
return self._raw_data.get('use_angles', "0")
@property
def fogcolor(self):
return parse_int_vector(self._raw_data.get('fogcolor', "255 255 255"))
@property
def fogcolor2(self):
return parse_int_vector(self._raw_data.get('fogcolor2', "255 255 255"))
@property
def fogdir(self):
return self._raw_data.get('fogdir', "1 0 0")
@property
def fogstart(self):
return self._raw_data.get('fogstart', "500.0")
@property
def fogend(self):
return self._raw_data.get('fogend', "2000.0")
@property
def fogmaxdensity(self):
return parse_source_value(self._raw_data.get('fogmaxdensity', 1))
@property
def foglerptime(self):
return parse_source_value(self._raw_data.get('foglerptime', 0))
@property
def farz(self):
return self._raw_data.get('farz', "-1")
@property
def HDRColorScale(self):
return parse_source_value(self._raw_data.get('hdrcolorscale', 1.0))
class postprocess_controller(Targetname):
icon_sprite = "editor/fog_controller.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def localcontraststrength(self):
return parse_source_value(self._raw_data.get('localcontraststrength', 0))
@property
def localcontrastedgestrength(self):
return parse_source_value(self._raw_data.get('localcontrastedgestrength', 0))
@property
def vignettestart(self):
return parse_source_value(self._raw_data.get('vignettestart', 1))
@property
def vignetteend(self):
return parse_source_value(self._raw_data.get('vignetteend', 2))
@property
def vignetteblurstrength(self):
return parse_source_value(self._raw_data.get('vignetteblurstrength', 0))
@property
def grainstrength(self):
return parse_source_value(self._raw_data.get('grainstrength', 1))
@property
def topvignettestrength(self):
return parse_source_value(self._raw_data.get('topvignettestrength', 1))
@property
def fadetime(self):
return parse_source_value(self._raw_data.get('fadetime', 2))
class env_steam(Parentname, Angles, Targetname):
viewport_model = "models/editor/spot_cone.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def InitialState(self):
return self._raw_data.get('initialstate', "0")
@property
def type(self):
return self._raw_data.get('type', "0")
@property
def SpreadSpeed(self):
return parse_source_value(self._raw_data.get('spreadspeed', 15))
@property
def Speed(self):
return parse_source_value(self._raw_data.get('speed', 120))
@property
def StartSize(self):
return parse_source_value(self._raw_data.get('startsize', 10))
@property
def EndSize(self):
return parse_source_value(self._raw_data.get('endsize', 25))
@property
def Rate(self):
return parse_source_value(self._raw_data.get('rate', 26))
@property
def rendercolor(self):
return parse_int_vector(self._raw_data.get('rendercolor', "255 255 255"))
@property
def JetLength(self):
return parse_source_value(self._raw_data.get('jetlength', 80))
@property
def renderamt(self):
return parse_source_value(self._raw_data.get('renderamt', 255))
@property
def rollspeed(self):
return parse_source_value(self._raw_data.get('rollspeed', 8))
@property
def StartNoise(self):
return self._raw_data.get('startnoise', "")
class env_laser(Parentname, RenderFxChoices, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def LaserTarget(self):
return self._raw_data.get('lasertarget', None)
@property
def renderamt(self):
return parse_source_value(self._raw_data.get('renderamt', 100))
@property
def rendercolor(self):
return parse_int_vector(self._raw_data.get('rendercolor', "255 255 255"))
@property
def width(self):
return parse_source_value(self._raw_data.get('width', 2))
@property
def NoiseAmplitude(self):
return parse_source_value(self._raw_data.get('noiseamplitude', 0))
@property
def texture(self):
return self._raw_data.get('texture', "sprites/laserbeam.spr")
@property
def EndSprite(self):
return self._raw_data.get('endsprite', "")
@property
def TextureScroll(self):
return parse_source_value(self._raw_data.get('texturescroll', 35))
@property
def framestart(self):
return parse_source_value(self._raw_data.get('framestart', 0))
@property
def damage(self):
return self._raw_data.get('damage', "100")
@property
def dissolvetype(self):
return self._raw_data.get('dissolvetype', "None")
class env_message(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def message(self):
return self._raw_data.get('message', None)
@property
def messagesound(self):
return self._raw_data.get('messagesound', "")
@property
def messagevolume(self):
return self._raw_data.get('messagevolume', "10")
@property
def messageattenuation(self):
return self._raw_data.get('messageattenuation', "0")
class env_hudhint(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def message(self):
return self._raw_data.get('message', "")
class env_shake(Parentname, Targetname):
icon_sprite = "editor/env_shake.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def amplitude(self):
return parse_source_value(self._raw_data.get('amplitude', 4))
@property
def radius(self):
return parse_source_value(self._raw_data.get('radius', 500))
@property
def duration(self):
return parse_source_value(self._raw_data.get('duration', 1))
@property
def frequency(self):
return parse_source_value(self._raw_data.get('frequency', 2.5))
class env_viewpunch(Parentname, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def punchangle(self):
return parse_float_vector(self._raw_data.get('punchangle', "0 0 90"))
@property
def radius(self):
return parse_source_value(self._raw_data.get('radius', 500))
class env_rotorwash_emitter(Parentname, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def altitude(self):
return parse_source_value(self._raw_data.get('altitude', 1024))
class gibshooter(gibshooterbase):
icon_sprite = "editor/gibshooter.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class env_shooter(RenderFields, gibshooterbase):
icon_sprite = "editor/env_shooter.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def shootmodel(self):
return self._raw_data.get('shootmodel', "")
@property
def shootsounds(self):
return self._raw_data.get('shootsounds', "-1")
@property
def simulation(self):
return self._raw_data.get('simulation', "0")
@property
def skin(self):
return parse_source_value(self._raw_data.get('skin', 0))
@property
def nogibshadows(self):
return self._raw_data.get('nogibshadows', "0")
@property
def gibgravityscale(self):
return parse_source_value(self._raw_data.get('gibgravityscale', 1))
@property
def massoverride(self):
return parse_source_value(self._raw_data.get('massoverride', 0))
class env_rotorshooter(RenderFields, gibshooterbase):
icon_sprite = "editor/env_shooter.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def shootmodel(self):
return self._raw_data.get('shootmodel', "")
@property
def shootsounds(self):
return self._raw_data.get('shootsounds', "-1")
@property
def simulation(self):
return self._raw_data.get('simulation', "0")
@property
def skin(self):
return parse_source_value(self._raw_data.get('skin', 0))
@property
def rotortime(self):
return parse_source_value(self._raw_data.get('rotortime', 1))
@property
def rotortimevariance(self):
return parse_source_value(self._raw_data.get('rotortimevariance', 0.3))
class env_soundscape_proxy(Parentname, Targetname):
icon_sprite = "editor/env_soundscape.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def MainSoundscapeName(self):
return self._raw_data.get('mainsoundscapename', "")
@property
def radius(self):
return parse_source_value(self._raw_data.get('radius', 128))
class env_soundscape(Parentname, EnableDisable, Targetname):
icon_sprite = "editor/env_soundscape.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def radius(self):
return parse_source_value(self._raw_data.get('radius', 128))
@property
def soundscape(self):
return self._raw_data.get('soundscape', "Nothing")
@property
def position0(self):
return self._raw_data.get('position0', "")
@property
def position1(self):
return self._raw_data.get('position1', "")
@property
def position2(self):
return self._raw_data.get('position2', "")
@property
def position3(self):
return self._raw_data.get('position3', "")
@property
def position4(self):
return self._raw_data.get('position4', "")
@property
def position5(self):
return self._raw_data.get('position5', "")
@property
def position6(self):
return self._raw_data.get('position6', "")
@property
def position7(self):
return self._raw_data.get('position7', "")
class env_soundscape_triggerable(env_soundscape):
icon_sprite = "editor/env_soundscape.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class env_spark(Parentname, Angles, Targetname):
icon_sprite = "editor/env_spark.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def MaxDelay(self):
return self._raw_data.get('maxdelay', "0")
@property
def Magnitude(self):
return self._raw_data.get('magnitude', "1")
@property
def TrailLength(self):
return self._raw_data.get('traillength', "1")
class env_sprite(RenderFields, Parentname, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def framerate(self):
return self._raw_data.get('framerate', "10.0")
@property
def model(self):
return self._raw_data.get('model', "sprites/glow01.spr")
@property
def scale(self):
return self._raw_data.get('scale', "")
@property
def GlowProxySize(self):
return parse_source_value(self._raw_data.get('glowproxysize', 2.0))
@property
def HDRColorScale(self):
return parse_source_value(self._raw_data.get('hdrcolorscale', 0.7))
class env_sprite_oriented(Angles, env_sprite):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class env_wind(Angles, Targetname):
icon_sprite = "editor/env_wind.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def minwind(self):
return parse_source_value(self._raw_data.get('minwind', 20))
@property
def maxwind(self):
return parse_source_value(self._raw_data.get('maxwind', 50))
@property
def windradius(self):
return parse_source_value(self._raw_data.get('windradius', -1))
@property
def mingust(self):
return parse_source_value(self._raw_data.get('mingust', 100))
@property
def maxgust(self):
return parse_source_value(self._raw_data.get('maxgust', 250))
@property
def mingustdelay(self):
return parse_source_value(self._raw_data.get('mingustdelay', 10))
@property
def maxgustdelay(self):
return parse_source_value(self._raw_data.get('maxgustdelay', 20))
@property
def gustduration(self):
return parse_source_value(self._raw_data.get('gustduration', 5))
@property
def gustdirchange(self):
return parse_source_value(self._raw_data.get('gustdirchange', 20))
class sky_camera(Angles):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def scale(self):
return parse_source_value(self._raw_data.get('scale', 16))
@property
def fogenable(self):
return self._raw_data.get('fogenable', "0")
@property
def fogblend(self):
return self._raw_data.get('fogblend', "0")
@property
def use_angles(self):
return self._raw_data.get('use_angles', "0")
@property
def clip_3D_skybox_near_to_world_far(self):
return self._raw_data.get('clip_3d_skybox_near_to_world_far', "0")
@property
def clip_3D_skybox_near_to_world_far_offset(self):
return self._raw_data.get('clip_3d_skybox_near_to_world_far_offset', "0.0")
@property
def fogcolor(self):
return parse_int_vector(self._raw_data.get('fogcolor', "255 255 255"))
@property
def fogcolor2(self):
return parse_int_vector(self._raw_data.get('fogcolor2', "255 255 255"))
@property
def fogdir(self):
return self._raw_data.get('fogdir', "1 0 0")
@property
def fogstart(self):
return self._raw_data.get('fogstart', "500.0")
@property
def fogend(self):
return self._raw_data.get('fogend', "2000.0")
@property
def fogmaxdensity(self):
return parse_source_value(self._raw_data.get('fogmaxdensity', 1))
@property
def HDRColorScale(self):
return parse_source_value(self._raw_data.get('hdrcolorscale', 1.0))
class BaseSpeaker(ResponseContext, Targetname):
@property
def delaymin(self):
return self._raw_data.get('delaymin', "15")
@property
def delaymax(self):
return self._raw_data.get('delaymax', "135")
@property
def rulescript(self):
return self._raw_data.get('rulescript', "")
@property
def concept(self):
return self._raw_data.get('concept', "")
class game_weapon_manager(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def weaponname(self):
return self._raw_data.get('weaponname', "")
@property
def maxpieces(self):
return parse_source_value(self._raw_data.get('maxpieces', 0))
@property
def ammomod(self):
return parse_source_value(self._raw_data.get('ammomod', 1))
class game_end(Targetname):
icon_sprite = "editor/game_end.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def master(self):
return self._raw_data.get('master', None)
class game_player_equip(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def master(self):
return self._raw_data.get('master', None)
class game_player_team(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', None)
@property
def master(self):
return self._raw_data.get('master', None)
class game_score(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def points(self):
return parse_source_value(self._raw_data.get('points', 1))
@property
def master(self):
return self._raw_data.get('master', None)
class game_text(Targetname):
icon_sprite = "editor/game_text.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def message(self):
return self._raw_data.get('message', "")
@property
def x(self):
return self._raw_data.get('x', "-1")
@property
def y(self):
return self._raw_data.get('y', "-1")
@property
def effect(self):
return self._raw_data.get('effect', "0")
@property
def color(self):
return parse_int_vector(self._raw_data.get('color', "100 100 100"))
@property
def color2(self):
return parse_int_vector(self._raw_data.get('color2', "240 110 0"))
@property
def fadein(self):
return self._raw_data.get('fadein', "1.5")
@property
def fadeout(self):
return self._raw_data.get('fadeout', "0.5")
@property
def holdtime(self):
return self._raw_data.get('holdtime', "1.2")
@property
def fxtime(self):
return self._raw_data.get('fxtime', "0.25")
@property
def channel(self):
return self._raw_data.get('channel', "1")
@property
def master(self):
return self._raw_data.get('master', None)
class point_enable_motion_fixup(Parentname, Angles):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class point_message(Parentname, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def message(self):
return self._raw_data.get('message', None)
@property
def radius(self):
return parse_source_value(self._raw_data.get('radius', 128))
@property
def developeronly(self):
return self._raw_data.get('developeronly', "0")
class point_spotlight(RenderFields, Parentname, Angles, Targetname):
model_ = "models/editor/cone_helper.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def spotlightlength(self):
return parse_source_value(self._raw_data.get('spotlightlength', 500))
@property
def spotlightwidth(self):
return parse_source_value(self._raw_data.get('spotlightwidth', 50))
@property
def HaloScale(self):
return parse_source_value(self._raw_data.get('haloscale', 60))
@property
def HDRColorScale(self):
return parse_source_value(self._raw_data.get('hdrcolorscale', 0.7))
class point_tesla(Parentname, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def m_SourceEntityName(self):
return self._raw_data.get('m_sourceentityname', "")
@property
def m_SoundName(self):
return self._raw_data.get('m_soundname', "DoSpark")
@property
def texture(self):
return self._raw_data.get('texture', "sprites/physbeam.vmt")
@property
def m_Color(self):
return parse_int_vector(self._raw_data.get('m_color', "255 255 255"))
@property
def m_flRadius(self):
return parse_source_value(self._raw_data.get('m_flradius', 200))
@property
def beamcount_min(self):
return parse_source_value(self._raw_data.get('beamcount_min', 6))
@property
def beamcount_max(self):
return parse_source_value(self._raw_data.get('beamcount_max', 8))
@property
def thick_min(self):
return self._raw_data.get('thick_min', "4")
@property
def thick_max(self):
return self._raw_data.get('thick_max', "5")
@property
def lifetime_min(self):
return self._raw_data.get('lifetime_min', "0.3")
@property
def lifetime_max(self):
return self._raw_data.get('lifetime_max', "0.3")
@property
def interval_min(self):
return self._raw_data.get('interval_min', "0.5")
@property
def interval_max(self):
return self._raw_data.get('interval_max', "2")
class point_clientcommand(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class point_servercommand(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class point_broadcastclientcommand(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class point_bonusmaps_accessor(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def filename(self):
return self._raw_data.get('filename', "")
@property
def mapname(self):
return self._raw_data.get('mapname', "")
class game_ui(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def FieldOfView(self):
return parse_source_value(self._raw_data.get('fieldofview', -1.0))
class point_entity_finder(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def filtername(self):
return self._raw_data.get('filtername', None)
@property
def referencename(self):
return self._raw_data.get('referencename', "")
@property
def Method(self):
return self._raw_data.get('method', "0")
class game_zone_player(Parentname, Targetname):
pass
class infodecal(Targetname):
model_ = "models/editor/axis_helper_thick.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def texture(self):
return self._raw_data.get('texture', None)
@property
def LowPriority(self):
return self._raw_data.get('lowpriority', "0")
@property
def ApplyEntity(self):
return self._raw_data.get('applyentity', None)
class info_projecteddecal(Angles, Targetname):
model_ = "models/editor/axis_helper_thick.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def texture(self):
return self._raw_data.get('texture', None)
@property
def Distance(self):
return parse_source_value(self._raw_data.get('distance', 64))
class info_no_dynamic_shadow(Base):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def sides(self):
return self._raw_data.get('sides', None)
class info_player_start(PlayerClass, Angles):
model_ = "models/editor/playerstart.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class info_overlay(SystemLevelChoice, Targetname):
model_ = "models/editor/overlay_helper.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def material(self):
return self._raw_data.get('material', None)
@property
def sides(self):
return self._raw_data.get('sides', None)
@property
def RenderOrder(self):
return parse_source_value(self._raw_data.get('renderorder', 0))
@property
def StartU(self):
return parse_source_value(self._raw_data.get('startu', 0.0))
@property
def EndU(self):
return parse_source_value(self._raw_data.get('endu', 1.0))
@property
def StartV(self):
return parse_source_value(self._raw_data.get('startv', 0.0))
@property
def EndV(self):
return parse_source_value(self._raw_data.get('endv', 1.0))
@property
def BasisOrigin(self):
return parse_float_vector(self._raw_data.get('basisorigin', None))
@property
def BasisU(self):
return parse_float_vector(self._raw_data.get('basisu', None))
@property
def BasisV(self):
return parse_float_vector(self._raw_data.get('basisv', None))
@property
def BasisNormal(self):
return parse_float_vector(self._raw_data.get('basisnormal', None))
@property
def uv0(self):
return parse_float_vector(self._raw_data.get('uv0', None))
@property
def uv1(self):
return parse_float_vector(self._raw_data.get('uv1', None))
@property
def uv2(self):
return parse_float_vector(self._raw_data.get('uv2', None))
@property
def uv3(self):
return parse_float_vector(self._raw_data.get('uv3', None))
@property
def fademindist(self):
return parse_source_value(self._raw_data.get('fademindist', -1))
@property
def fademaxdist(self):
return parse_source_value(self._raw_data.get('fademaxdist', 0))
class info_overlay_transition(Base):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def material(self):
return self._raw_data.get('material', None)
@property
def sides(self):
return self._raw_data.get('sides', None)
@property
def sides2(self):
return self._raw_data.get('sides2', None)
@property
def LengthTexcoordStart(self):
return parse_source_value(self._raw_data.get('lengthtexcoordstart', 0.0))
@property
def LengthTexcoordEnd(self):
return parse_source_value(self._raw_data.get('lengthtexcoordend', 1.0))
@property
def WidthTexcoordStart(self):
return parse_source_value(self._raw_data.get('widthtexcoordstart', 0.0))
@property
def WidthTexcoordEnd(self):
return parse_source_value(self._raw_data.get('widthtexcoordend', 1.0))
@property
def Width1(self):
return parse_source_value(self._raw_data.get('width1', 25.0))
@property
def Width2(self):
return parse_source_value(self._raw_data.get('width2', 25.0))
@property
def DebugDraw(self):
return parse_source_value(self._raw_data.get('debugdraw', 0))
class info_intermission(Base):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', None)
class info_landmark(Targetname):
icon_sprite = "editor/info_landmark"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class info_null(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class info_target(Parentname, Angles, Targetname):
icon_sprite = "editor/info_target.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class info_particle_target(Parentname, Angles, Targetname):
model_ = "models/editor/cone_helper.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class info_particle_system(Parentname, Angles, Targetname):
model_ = "models/editor/cone_helper.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def effect_name(self):
return self._raw_data.get('effect_name', None)
@property
def start_active(self):
return self._raw_data.get('start_active', "0")
@property
def render_in_front(self):
return self._raw_data.get('render_in_front', "0")
@property
def cpoint1(self):
return self._raw_data.get('cpoint1', None)
@property
def cpoint2(self):
return self._raw_data.get('cpoint2', None)
@property
def cpoint3(self):
return self._raw_data.get('cpoint3', None)
@property
def cpoint4(self):
return self._raw_data.get('cpoint4', None)
@property
def cpoint5(self):
return self._raw_data.get('cpoint5', None)
@property
def cpoint6(self):
return self._raw_data.get('cpoint6', None)
@property
def cpoint7(self):
return self._raw_data.get('cpoint7', None)
@property
def cpoint8(self):
return self._raw_data.get('cpoint8', None)
@property
def cpoint9(self):
return self._raw_data.get('cpoint9', None)
@property
def cpoint10(self):
return self._raw_data.get('cpoint10', None)
@property
def cpoint11(self):
return self._raw_data.get('cpoint11', None)
@property
def cpoint12(self):
return self._raw_data.get('cpoint12', None)
@property
def cpoint13(self):
return self._raw_data.get('cpoint13', None)
@property
def cpoint14(self):
return self._raw_data.get('cpoint14', None)
@property
def cpoint15(self):
return self._raw_data.get('cpoint15', None)
@property
def cpoint16(self):
return self._raw_data.get('cpoint16', None)
@property
def cpoint17(self):
return self._raw_data.get('cpoint17', None)
@property
def cpoint18(self):
return self._raw_data.get('cpoint18', None)
@property
def cpoint19(self):
return self._raw_data.get('cpoint19', None)
@property
def cpoint20(self):
return self._raw_data.get('cpoint20', None)
@property
def cpoint21(self):
return self._raw_data.get('cpoint21', None)
@property
def cpoint22(self):
return self._raw_data.get('cpoint22', None)
@property
def cpoint23(self):
return self._raw_data.get('cpoint23', None)
@property
def cpoint24(self):
return self._raw_data.get('cpoint24', None)
@property
def cpoint25(self):
return self._raw_data.get('cpoint25', None)
@property
def cpoint26(self):
return self._raw_data.get('cpoint26', None)
@property
def cpoint27(self):
return self._raw_data.get('cpoint27', None)
@property
def cpoint28(self):
return self._raw_data.get('cpoint28', None)
@property
def cpoint29(self):
return self._raw_data.get('cpoint29', None)
@property
def cpoint30(self):
return self._raw_data.get('cpoint30', None)
@property
def cpoint31(self):
return self._raw_data.get('cpoint31', None)
@property
def cpoint32(self):
return self._raw_data.get('cpoint32', None)
@property
def cpoint33(self):
return self._raw_data.get('cpoint33', None)
@property
def cpoint34(self):
return self._raw_data.get('cpoint34', None)
@property
def cpoint35(self):
return self._raw_data.get('cpoint35', None)
@property
def cpoint36(self):
return self._raw_data.get('cpoint36', None)
@property
def cpoint37(self):
return self._raw_data.get('cpoint37', None)
@property
def cpoint38(self):
return self._raw_data.get('cpoint38', None)
@property
def cpoint39(self):
return self._raw_data.get('cpoint39', None)
@property
def cpoint40(self):
return self._raw_data.get('cpoint40', None)
@property
def cpoint41(self):
return self._raw_data.get('cpoint41', None)
@property
def cpoint42(self):
return self._raw_data.get('cpoint42', None)
@property
def cpoint43(self):
return self._raw_data.get('cpoint43', None)
@property
def cpoint44(self):
return self._raw_data.get('cpoint44', None)
@property
def cpoint45(self):
return self._raw_data.get('cpoint45', None)
@property
def cpoint46(self):
return self._raw_data.get('cpoint46', None)
@property
def cpoint47(self):
return self._raw_data.get('cpoint47', None)
@property
def cpoint48(self):
return self._raw_data.get('cpoint48', None)
@property
def cpoint49(self):
return self._raw_data.get('cpoint49', None)
@property
def cpoint50(self):
return self._raw_data.get('cpoint50', None)
@property
def cpoint51(self):
return self._raw_data.get('cpoint51', None)
@property
def cpoint52(self):
return self._raw_data.get('cpoint52', None)
@property
def cpoint53(self):
return self._raw_data.get('cpoint53', None)
@property
def cpoint54(self):
return self._raw_data.get('cpoint54', None)
@property
def cpoint55(self):
return self._raw_data.get('cpoint55', None)
@property
def cpoint56(self):
return self._raw_data.get('cpoint56', None)
@property
def cpoint57(self):
return self._raw_data.get('cpoint57', None)
@property
def cpoint58(self):
return self._raw_data.get('cpoint58', None)
@property
def cpoint59(self):
return self._raw_data.get('cpoint59', None)
@property
def cpoint60(self):
return self._raw_data.get('cpoint60', None)
@property
def cpoint61(self):
return self._raw_data.get('cpoint61', None)
@property
def cpoint62(self):
return self._raw_data.get('cpoint62', None)
@property
def cpoint63(self):
return self._raw_data.get('cpoint63', None)
@property
def cpoint1_parent(self):
return parse_source_value(self._raw_data.get('cpoint1_parent', 0))
@property
def cpoint2_parent(self):
return parse_source_value(self._raw_data.get('cpoint2_parent', 0))
@property
def cpoint3_parent(self):
return parse_source_value(self._raw_data.get('cpoint3_parent', 0))
@property
def cpoint4_parent(self):
return parse_source_value(self._raw_data.get('cpoint4_parent', 0))
@property
def cpoint5_parent(self):
return parse_source_value(self._raw_data.get('cpoint5_parent', 0))
@property
def cpoint6_parent(self):
return parse_source_value(self._raw_data.get('cpoint6_parent', 0))
@property
def cpoint7_parent(self):
return parse_source_value(self._raw_data.get('cpoint7_parent', 0))
class phys_ragdollmagnet(Parentname, Angles, EnableDisable, Targetname):
icon_sprite = "editor/info_target.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def axis(self):
return self._raw_data.get('axis', None)
@property
def radius(self):
return parse_source_value(self._raw_data.get('radius', 512))
@property
def force(self):
return parse_source_value(self._raw_data.get('force', 5000))
@property
def target(self):
return self._raw_data.get('target', "")
class info_lighting(Targetname):
icon_sprite = "editor/info_lighting.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class info_teleport_destination(PlayerClass, Parentname, Angles, Targetname):
model_ = "models/editor/playerstart.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class info_node(Node):
model_ = "models/editor/ground_node.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class info_node_hint(Angles, HintNode, Targetname):
model_ = "models/editor/ground_node_hint.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class info_node_air(Node):
model_ = "models/editor/air_node.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def nodeheight(self):
return parse_source_value(self._raw_data.get('nodeheight', 0))
class info_node_air_hint(Angles, HintNode, Targetname):
model_ = "models/editor/air_node_hint.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def nodeheight(self):
return parse_source_value(self._raw_data.get('nodeheight', 0))
class info_hint(Angles, HintNode, Targetname):
model_ = "models/editor/node_hint.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class info_node_link(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def StartNode(self):
return parse_source_value(self._raw_data.get('startnode', None))
@property
def EndNode(self):
return parse_source_value(self._raw_data.get('endnode', None))
@property
def initialstate(self):
return self._raw_data.get('initialstate', "1")
@property
def linktype(self):
return self._raw_data.get('linktype', "1")
@property
def AllowUse(self):
return self._raw_data.get('allowuse', None)
@property
def InvertAllow(self):
return self._raw_data.get('invertallow', "0")
class info_node_link_controller(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def mins(self):
return parse_float_vector(self._raw_data.get('mins', "-8 -32 -36"))
@property
def maxs(self):
return parse_float_vector(self._raw_data.get('maxs', "8 32 36"))
@property
def initialstate(self):
return self._raw_data.get('initialstate', "1")
@property
def useairlinkradius(self):
return self._raw_data.get('useairlinkradius', "0")
@property
def AllowUse(self):
return self._raw_data.get('allowuse', None)
@property
def InvertAllow(self):
return self._raw_data.get('invertallow', "0")
class info_radial_link_controller(Parentname, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def radius(self):
return parse_source_value(self._raw_data.get('radius', 120))
class info_node_climb(Angles, HintNode, Targetname):
model_ = "models/editor/climb_node.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class light(Light, Targetname):
icon_sprite = "editor/light.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', None)
@property
def _distance(self):
return parse_source_value(self._raw_data.get('_distance', 0))
class light_environment(Angles):
icon_sprite = "editor/light_env.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def pitch(self):
return parse_source_value(self._raw_data.get('pitch', 0))
@property
def _light(self):
return parse_int_vector(self._raw_data.get('_light', "255 255 255 200"))
@property
def _ambient(self):
return parse_int_vector(self._raw_data.get('_ambient', "255 255 255 20"))
@property
def _lightHDR(self):
return parse_int_vector(self._raw_data.get('_lighthdr', "-1 -1 -1 1"))
@property
def _lightscaleHDR(self):
return parse_source_value(self._raw_data.get('_lightscalehdr', 0.7))
@property
def _ambientHDR(self):
return parse_int_vector(self._raw_data.get('_ambienthdr', "-1 -1 -1 1"))
@property
def _AmbientScaleHDR(self):
return parse_source_value(self._raw_data.get('_ambientscalehdr', 0.7))
@property
def SunSpreadAngle(self):
return parse_source_value(self._raw_data.get('sunspreadangle', 0))
class light_directional(Angles):
icon_sprite = "editor/light_env.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def pitch(self):
return parse_source_value(self._raw_data.get('pitch', 0))
@property
def _light(self):
return parse_int_vector(self._raw_data.get('_light', "255 255 255 200"))
@property
def _lightHDR(self):
return parse_int_vector(self._raw_data.get('_lighthdr', "-1 -1 -1 1"))
@property
def _lightscaleHDR(self):
return parse_source_value(self._raw_data.get('_lightscalehdr', 0.7))
@property
def SunSpreadAngle(self):
return parse_source_value(self._raw_data.get('sunspreadangle', 0))
class light_spot(Light, Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', None)
@property
def _inner_cone(self):
return parse_source_value(self._raw_data.get('_inner_cone', 30))
@property
def _cone(self):
return parse_source_value(self._raw_data.get('_cone', 45))
@property
def _exponent(self):
return parse_source_value(self._raw_data.get('_exponent', 1))
@property
def _distance(self):
return parse_source_value(self._raw_data.get('_distance', 0))
@property
def pitch(self):
return parse_source_value(self._raw_data.get('pitch', -90))
class light_dynamic(Parentname, Angles, Targetname):
icon_sprite = "editor/light.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', None)
@property
def _light(self):
return parse_int_vector(self._raw_data.get('_light', "255 255 255 200"))
@property
def brightness(self):
return parse_source_value(self._raw_data.get('brightness', 0))
@property
def _inner_cone(self):
return parse_source_value(self._raw_data.get('_inner_cone', 30))
@property
def _cone(self):
return parse_source_value(self._raw_data.get('_cone', 45))
@property
def pitch(self):
return parse_source_value(self._raw_data.get('pitch', -90))
@property
def distance(self):
return parse_source_value(self._raw_data.get('distance', 120))
@property
def spotlight_radius(self):
return parse_source_value(self._raw_data.get('spotlight_radius', 80))
@property
def style(self):
return self._raw_data.get('style', "0")
class shadow_control(Targetname):
icon_sprite = "editor/shadow_control.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def angles(self):
return self._raw_data.get('angles', "80 30 0")
@property
def color(self):
return parse_int_vector(self._raw_data.get('color', "128 128 128"))
@property
def distance(self):
return parse_source_value(self._raw_data.get('distance', 75))
@property
def disableallshadows(self):
return self._raw_data.get('disableallshadows', "0")
@property
def enableshadowsfromlocallights(self):
return self._raw_data.get('enableshadowsfromlocallights', "0")
class color_correction(EnableDisable, Targetname):
icon_sprite = "editor/color_correction.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def minfalloff(self):
return parse_source_value(self._raw_data.get('minfalloff', 0.0))
@property
def maxfalloff(self):
return parse_source_value(self._raw_data.get('maxfalloff', 200.0))
@property
def maxweight(self):
return parse_source_value(self._raw_data.get('maxweight', 1.0))
@property
def filename(self):
return self._raw_data.get('filename', "")
@property
def fadeInDuration(self):
return parse_source_value(self._raw_data.get('fadeinduration', 0.0))
@property
def fadeOutDuration(self):
return parse_source_value(self._raw_data.get('fadeoutduration', 0.0))
@property
def exclusive(self):
return self._raw_data.get('exclusive', "0")
class color_correction_volume(EnableDisable, Targetname):
@property
def fadeDuration(self):
return parse_source_value(self._raw_data.get('fadeduration', 10.0))
@property
def maxweight(self):
return parse_source_value(self._raw_data.get('maxweight', 1.0))
@property
def filename(self):
return self._raw_data.get('filename', "")
class KeyFrame(Base):
@property
def NextKey(self):
return self._raw_data.get('nextkey', None)
@property
def MoveSpeed(self):
return parse_source_value(self._raw_data.get('movespeed', 64))
class Mover(Base):
@property
def PositionInterpolator(self):
return self._raw_data.get('positioninterpolator', "0")
class func_movelinear(RenderFields, Parentname, Origin, Targetname):
@property
def movedir(self):
return parse_float_vector(self._raw_data.get('movedir', "0 0 0"))
@property
def startposition(self):
return parse_source_value(self._raw_data.get('startposition', 0))
@property
def speed(self):
return parse_source_value(self._raw_data.get('speed', 100))
@property
def movedistance(self):
return parse_source_value(self._raw_data.get('movedistance', 100))
@property
def blockdamage(self):
return parse_source_value(self._raw_data.get('blockdamage', 0))
@property
def startsound(self):
return self._raw_data.get('startsound', None)
@property
def stopsound(self):
return self._raw_data.get('stopsound', None)
class func_water_analog(Parentname, Origin, Targetname):
@property
def movedir(self):
return parse_float_vector(self._raw_data.get('movedir', "0 0 0"))
@property
def startposition(self):
return parse_source_value(self._raw_data.get('startposition', 0))
@property
def speed(self):
return parse_source_value(self._raw_data.get('speed', 100))
@property
def movedistance(self):
return parse_source_value(self._raw_data.get('movedistance', 100))
@property
def startsound(self):
return self._raw_data.get('startsound', None)
@property
def stopsound(self):
return self._raw_data.get('stopsound', None)
@property
def WaveHeight(self):
return self._raw_data.get('waveheight', "3.0")
class func_rotating(Parentname, Shadow, Angles, Origin, RenderFields, Targetname):
@property
def maxspeed(self):
return parse_source_value(self._raw_data.get('maxspeed', 100))
@property
def fanfriction(self):
return parse_source_value(self._raw_data.get('fanfriction', 20))
@property
def message(self):
return self._raw_data.get('message', None)
@property
def volume(self):
return parse_source_value(self._raw_data.get('volume', 10))
@property
def _minlight(self):
return self._raw_data.get('_minlight', None)
@property
def dmg(self):
return parse_source_value(self._raw_data.get('dmg', 0))
@property
def solidbsp(self):
return self._raw_data.get('solidbsp', "0")
class func_platrot(Parentname, Shadow, Angles, BasePlat, Origin, RenderFields, Targetname):
@property
def noise1(self):
return self._raw_data.get('noise1', None)
@property
def noise2(self):
return self._raw_data.get('noise2', None)
@property
def speed(self):
return parse_source_value(self._raw_data.get('speed', 50))
@property
def height(self):
return parse_source_value(self._raw_data.get('height', 0))
@property
def rotation(self):
return parse_source_value(self._raw_data.get('rotation', 0))
@property
def _minlight(self):
return self._raw_data.get('_minlight', None)
class keyframe_track(KeyFrame, Parentname, Angles, Targetname):
pass
class move_keyframed(KeyFrame, Parentname, Mover, Targetname):
pass
class move_track(KeyFrame, Parentname, Mover, Targetname):
@property
def WheelBaseLength(self):
return parse_source_value(self._raw_data.get('wheelbaselength', 50))
@property
def Damage(self):
return parse_source_value(self._raw_data.get('damage', 0))
@property
def NoRotate(self):
return self._raw_data.get('norotate', "0")
class RopeKeyFrame(SystemLevelChoice):
@property
def Slack(self):
return parse_source_value(self._raw_data.get('slack', 25))
@property
def Type(self):
return self._raw_data.get('type', "0")
@property
def Subdiv(self):
return parse_source_value(self._raw_data.get('subdiv', 2))
@property
def Barbed(self):
return self._raw_data.get('barbed', "0")
@property
def Width(self):
return self._raw_data.get('width', "2")
@property
def TextureScale(self):
return self._raw_data.get('texturescale', "1")
@property
def Collide(self):
return self._raw_data.get('collide', "0")
@property
def Dangling(self):
return self._raw_data.get('dangling', "0")
@property
def Breakable(self):
return self._raw_data.get('breakable', "0")
@property
def UseWind(self):
return self._raw_data.get('usewind', "0")
@property
def RopeMaterial(self):
return self._raw_data.get('ropematerial', "cable/cable.vmt")
class keyframe_rope(KeyFrame, Parentname, RopeKeyFrame, Targetname):
model_ = "models/editor/axis_helper_thick.mdl"
pass
class move_rope(KeyFrame, Parentname, RopeKeyFrame, Targetname):
model_ = "models/editor/axis_helper.mdl"
@property
def PositionInterpolator(self):
return self._raw_data.get('positioninterpolator', "2")
class Button(Base):
pass
class func_button(Parentname, DamageFilter, Button, Origin, RenderFields, Targetname):
@property
def movedir(self):
return parse_float_vector(self._raw_data.get('movedir', "0 0 0"))
@property
def speed(self):
return parse_source_value(self._raw_data.get('speed', 5))
@property
def health(self):
return parse_source_value(self._raw_data.get('health', 0))
@property
def lip(self):
return parse_source_value(self._raw_data.get('lip', 0))
@property
def master(self):
return self._raw_data.get('master', None)
@property
def glow(self):
return self._raw_data.get('glow', None)
@property
def sounds(self):
return self._raw_data.get('sounds', "0")
@property
def wait(self):
return parse_source_value(self._raw_data.get('wait', 3))
@property
def locked_sound(self):
return self._raw_data.get('locked_sound', "0")
@property
def unlocked_sound(self):
return self._raw_data.get('unlocked_sound', "0")
@property
def locked_sentence(self):
return self._raw_data.get('locked_sentence', "0")
@property
def unlocked_sentence(self):
return self._raw_data.get('unlocked_sentence', "0")
@property
def _minlight(self):
return self._raw_data.get('_minlight', None)
class func_rot_button(Parentname, Button, Angles, Global, EnableDisable, Origin, Targetname):
@property
def master(self):
return self._raw_data.get('master', None)
@property
def speed(self):
return parse_source_value(self._raw_data.get('speed', 50))
@property
def health(self):
return parse_source_value(self._raw_data.get('health', 0))
@property
def sounds(self):
return self._raw_data.get('sounds', "21")
@property
def wait(self):
return parse_source_value(self._raw_data.get('wait', 3))
@property
def distance(self):
return parse_source_value(self._raw_data.get('distance', 90))
@property
def _minlight(self):
return self._raw_data.get('_minlight', None)
class momentary_rot_button(Parentname, Angles, Origin, RenderFields, Targetname):
@property
def speed(self):
return parse_source_value(self._raw_data.get('speed', 50))
@property
def master(self):
return self._raw_data.get('master', None)
@property
def glow(self):
return self._raw_data.get('glow', None)
@property
def sounds(self):
return self._raw_data.get('sounds', "0")
@property
def distance(self):
return parse_source_value(self._raw_data.get('distance', 90))
@property
def returnspeed(self):
return parse_source_value(self._raw_data.get('returnspeed', 0))
@property
def _minlight(self):
return self._raw_data.get('_minlight', None)
@property
def startposition(self):
return parse_source_value(self._raw_data.get('startposition', 0))
@property
def startdirection(self):
return self._raw_data.get('startdirection', "Forward")
@property
def solidbsp(self):
return self._raw_data.get('solidbsp', "0")
class Door(Parentname, Shadow, Global, RenderFields, Targetname):
@property
def speed(self):
return parse_source_value(self._raw_data.get('speed', 100))
@property
def master(self):
return self._raw_data.get('master', None)
@property
def noise1(self):
return self._raw_data.get('noise1', None)
@property
def noise2(self):
return self._raw_data.get('noise2', None)
@property
def startclosesound(self):
return self._raw_data.get('startclosesound', None)
@property
def closesound(self):
return self._raw_data.get('closesound', None)
@property
def wait(self):
return parse_source_value(self._raw_data.get('wait', 4))
@property
def lip(self):
return parse_source_value(self._raw_data.get('lip', 0))
@property
def dmg(self):
return parse_source_value(self._raw_data.get('dmg', 0))
@property
def forceclosed(self):
return self._raw_data.get('forceclosed', "0")
@property
def ignoredebris(self):
return self._raw_data.get('ignoredebris', "0")
@property
def message(self):
return self._raw_data.get('message', None)
@property
def health(self):
return parse_source_value(self._raw_data.get('health', 0))
@property
def locked_sound(self):
return self._raw_data.get('locked_sound', None)
@property
def unlocked_sound(self):
return self._raw_data.get('unlocked_sound', None)
@property
def spawnpos(self):
return self._raw_data.get('spawnpos', "0")
@property
def locked_sentence(self):
return self._raw_data.get('locked_sentence', "0")
@property
def unlocked_sentence(self):
return self._raw_data.get('unlocked_sentence', "0")
@property
def _minlight(self):
return self._raw_data.get('_minlight', None)
@property
def loopmovesound(self):
return self._raw_data.get('loopmovesound', "0")
class func_door(Door, Origin):
@property
def movedir(self):
return parse_float_vector(self._raw_data.get('movedir', "0 0 0"))
@property
def filtername(self):
return self._raw_data.get('filtername', None)
class func_door_rotating(Door, Angles, Origin):
@property
def distance(self):
return parse_source_value(self._raw_data.get('distance', 90))
@property
def always_fire_blocked_outputs(self):
return self._raw_data.get('always_fire_blocked_outputs', "0")
@property
def solidbsp(self):
return self._raw_data.get('solidbsp', "0")
class BaseFadeProp(Base):
@property
def fademindist(self):
return parse_source_value(self._raw_data.get('fademindist', -1))
@property
def fademaxdist(self):
return parse_source_value(self._raw_data.get('fademaxdist', 0))
@property
def fadescale(self):
return parse_source_value(self._raw_data.get('fadescale', 1))
class prop_door_rotating(Parentname, Glow, Studiomodel, Angles, Global, BaseFadeProp, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def slavename(self):
return self._raw_data.get('slavename', None)
@property
def hardware(self):
return self._raw_data.get('hardware', "1")
@property
def ajarangles(self):
return parse_float_vector(self._raw_data.get('ajarangles', "0 0 0"))
@property
def spawnpos(self):
return self._raw_data.get('spawnpos', "0")
@property
def axis(self):
return self._raw_data.get('axis', None)
@property
def distance(self):
return parse_source_value(self._raw_data.get('distance', 90))
@property
def speed(self):
return parse_source_value(self._raw_data.get('speed', 200))
@property
def soundopenoverride(self):
return self._raw_data.get('soundopenoverride', None)
@property
def soundcloseoverride(self):
return self._raw_data.get('soundcloseoverride', None)
@property
def soundmoveoverride(self):
return self._raw_data.get('soundmoveoverride', None)
@property
def returndelay(self):
return parse_source_value(self._raw_data.get('returndelay', -1))
@property
def dmg(self):
return parse_source_value(self._raw_data.get('dmg', 0))
@property
def health(self):
return parse_source_value(self._raw_data.get('health', 0))
@property
def soundlockedoverride(self):
return self._raw_data.get('soundlockedoverride', None)
@property
def soundunlockedoverride(self):
return self._raw_data.get('soundunlockedoverride', None)
@property
def rendercolor(self):
return parse_int_vector(self._raw_data.get('rendercolor', "255 255 255"))
@property
def forceclosed(self):
return self._raw_data.get('forceclosed', "0")
@property
def opendir(self):
return self._raw_data.get('opendir', "0")
class prop_wall_breakable(Parentname, Studiomodel, Angles, Global, BaseFadeProp, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class env_cubemap(Base):
icon_sprite = "editor/env_cubemap.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def cubemapsize(self):
return self._raw_data.get('cubemapsize', "0")
@property
def sides(self):
return self._raw_data.get('sides', None)
class BModelParticleSpawner(Base):
@property
def StartDisabled(self):
return self._raw_data.get('startdisabled', "0")
@property
def Color(self):
return parse_int_vector(self._raw_data.get('color', "255 255 255"))
@property
def SpawnRate(self):
return parse_source_value(self._raw_data.get('spawnrate', 40))
@property
def SpeedMax(self):
return self._raw_data.get('speedmax', "13")
@property
def LifetimeMin(self):
return self._raw_data.get('lifetimemin', "3")
@property
def LifetimeMax(self):
return self._raw_data.get('lifetimemax', "5")
@property
def DistMax(self):
return parse_source_value(self._raw_data.get('distmax', 1024))
@property
def Frozen(self):
return self._raw_data.get('frozen', "0")
class func_dustmotes(BModelParticleSpawner, Targetname):
@property
def SizeMin(self):
return self._raw_data.get('sizemin', "10")
@property
def SizeMax(self):
return self._raw_data.get('sizemax', "20")
@property
def Alpha(self):
return parse_source_value(self._raw_data.get('alpha', 255))
class func_smokevolume(Targetname):
@property
def Color1(self):
return parse_int_vector(self._raw_data.get('color1', "255 255 255"))
@property
def Color2(self):
return parse_int_vector(self._raw_data.get('color2', "255 255 255"))
@property
def material(self):
return self._raw_data.get('material', "particle/particle_smokegrenade")
@property
def ParticleDrawWidth(self):
return parse_source_value(self._raw_data.get('particledrawwidth', 120))
@property
def ParticleSpacingDistance(self):
return parse_source_value(self._raw_data.get('particlespacingdistance', 80))
@property
def DensityRampSpeed(self):
return parse_source_value(self._raw_data.get('densityrampspeed', 1))
@property
def RotationSpeed(self):
return parse_source_value(self._raw_data.get('rotationspeed', 10))
@property
def MovementSpeed(self):
return parse_source_value(self._raw_data.get('movementspeed', 10))
@property
def Density(self):
return parse_source_value(self._raw_data.get('density', 1))
@property
def MaxDrawDistance(self):
return parse_source_value(self._raw_data.get('maxdrawdistance', 0))
class func_dustcloud(BModelParticleSpawner, Targetname):
@property
def Alpha(self):
return parse_source_value(self._raw_data.get('alpha', 30))
@property
def SizeMin(self):
return self._raw_data.get('sizemin', "100")
@property
def SizeMax(self):
return self._raw_data.get('sizemax', "200")
class env_dustpuff(Parentname, Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def scale(self):
return parse_source_value(self._raw_data.get('scale', 8))
@property
def speed(self):
return parse_source_value(self._raw_data.get('speed', 16))
@property
def color(self):
return parse_int_vector(self._raw_data.get('color', "128 128 128"))
class env_particlescript(Parentname, Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def model(self):
return self._raw_data.get('model', "models/Ambient_citadel_paths.mdl")
class env_effectscript(Parentname, Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def model(self):
return self._raw_data.get('model', "models/Effects/teleporttrail.mdl")
@property
def scriptfile(self):
return self._raw_data.get('scriptfile', "scripts/effects/testeffect.txt")
class logic_auto(Base):
icon_sprite = "editor/logic_auto.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def globalstate(self):
return self._raw_data.get('globalstate', None)
class point_viewcontrol(Parentname, Angles, Targetname):
viewport_model = "models/editor/camera.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def fov(self):
return parse_source_value(self._raw_data.get('fov', 90))
@property
def fov_rate(self):
return parse_source_value(self._raw_data.get('fov_rate', 1.0))
@property
def target(self):
return self._raw_data.get('target', None)
@property
def targetattachment(self):
return self._raw_data.get('targetattachment', None)
@property
def wait(self):
return parse_source_value(self._raw_data.get('wait', 10))
@property
def moveto(self):
return self._raw_data.get('moveto', None)
@property
def interpolatepositiontoplayer(self):
return self._raw_data.get('interpolatepositiontoplayer', "0")
@property
def speed(self):
return self._raw_data.get('speed', "0")
@property
def acceleration(self):
return self._raw_data.get('acceleration', "500")
@property
def deceleration(self):
return self._raw_data.get('deceleration', "500")
class point_posecontroller(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def PropName(self):
return self._raw_data.get('propname', None)
@property
def PoseParameterName(self):
return self._raw_data.get('poseparametername', None)
@property
def PoseValue(self):
return parse_source_value(self._raw_data.get('posevalue', 0.0))
@property
def InterpolationTime(self):
return parse_source_value(self._raw_data.get('interpolationtime', 0.0))
@property
def InterpolationWrap(self):
return self._raw_data.get('interpolationwrap', "0")
@property
def CycleFrequency(self):
return parse_source_value(self._raw_data.get('cyclefrequency', 0.0))
@property
def FModulationType(self):
return self._raw_data.get('fmodulationtype', "0")
@property
def FModTimeOffset(self):
return parse_source_value(self._raw_data.get('fmodtimeoffset', 0.0))
@property
def FModRate(self):
return parse_source_value(self._raw_data.get('fmodrate', 0.0))
@property
def FModAmplitude(self):
return parse_source_value(self._raw_data.get('fmodamplitude', 0.0))
class logic_compare(Targetname):
icon_sprite = "editor/logic_compare.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def InitialValue(self):
return parse_source_value(self._raw_data.get('initialvalue', None))
@property
def CompareValue(self):
return parse_source_value(self._raw_data.get('comparevalue', None))
class logic_branch(Targetname):
icon_sprite = "editor/logic_branch.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def InitialValue(self):
return parse_source_value(self._raw_data.get('initialvalue', None))
class logic_branch_listener(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def Branch01(self):
return self._raw_data.get('branch01', None)
@property
def Branch02(self):
return self._raw_data.get('branch02', None)
@property
def Branch03(self):
return self._raw_data.get('branch03', None)
@property
def Branch04(self):
return self._raw_data.get('branch04', None)
@property
def Branch05(self):
return self._raw_data.get('branch05', None)
@property
def Branch06(self):
return self._raw_data.get('branch06', None)
@property
def Branch07(self):
return self._raw_data.get('branch07', None)
@property
def Branch08(self):
return self._raw_data.get('branch08', None)
@property
def Branch09(self):
return self._raw_data.get('branch09', None)
@property
def Branch10(self):
return self._raw_data.get('branch10', None)
@property
def Branch11(self):
return self._raw_data.get('branch11', None)
@property
def Branch12(self):
return self._raw_data.get('branch12', None)
@property
def Branch13(self):
return self._raw_data.get('branch13', None)
@property
def Branch14(self):
return self._raw_data.get('branch14', None)
@property
def Branch15(self):
return self._raw_data.get('branch15', None)
@property
def Branch16(self):
return self._raw_data.get('branch16', None)
class logic_case(Targetname):
icon_sprite = "editor/logic_case.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def Case01(self):
return self._raw_data.get('case01', None)
@property
def Case02(self):
return self._raw_data.get('case02', None)
@property
def Case03(self):
return self._raw_data.get('case03', None)
@property
def Case04(self):
return self._raw_data.get('case04', None)
@property
def Case05(self):
return self._raw_data.get('case05', None)
@property
def Case06(self):
return self._raw_data.get('case06', None)
@property
def Case07(self):
return self._raw_data.get('case07', None)
@property
def Case08(self):
return self._raw_data.get('case08', None)
@property
def Case09(self):
return self._raw_data.get('case09', None)
@property
def Case10(self):
return self._raw_data.get('case10', None)
@property
def Case11(self):
return self._raw_data.get('case11', None)
@property
def Case12(self):
return self._raw_data.get('case12', None)
@property
def Case13(self):
return self._raw_data.get('case13', None)
@property
def Case14(self):
return self._raw_data.get('case14', None)
@property
def Case15(self):
return self._raw_data.get('case15', None)
@property
def Case16(self):
return self._raw_data.get('case16', None)
class logic_multicompare(Targetname):
icon_sprite = "editor/logic_multicompare.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def IntegerValue(self):
return parse_source_value(self._raw_data.get('integervalue', None))
@property
def ShouldComparetoValue(self):
return self._raw_data.get('shouldcomparetovalue', "0")
class logic_relay(EnableDisable, Targetname):
icon_sprite = "editor/logic_relay.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class logic_timer(EnableDisable, Targetname):
icon_sprite = "editor/logic_timer.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def UseRandomTime(self):
return self._raw_data.get('userandomtime', "0")
@property
def LowerRandomBound(self):
return self._raw_data.get('lowerrandombound', None)
@property
def UpperRandomBound(self):
return self._raw_data.get('upperrandombound', None)
@property
def RefireTime(self):
return self._raw_data.get('refiretime', None)
class hammer_updateignorelist(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def IgnoredName01(self):
return self._raw_data.get('ignoredname01', "")
@property
def IgnoredName02(self):
return self._raw_data.get('ignoredname02', "")
@property
def IgnoredName03(self):
return self._raw_data.get('ignoredname03', "")
@property
def IgnoredName04(self):
return self._raw_data.get('ignoredname04', "")
@property
def IgnoredName05(self):
return self._raw_data.get('ignoredname05', "")
@property
def IgnoredName06(self):
return self._raw_data.get('ignoredname06', "")
@property
def IgnoredName07(self):
return self._raw_data.get('ignoredname07', "")
@property
def IgnoredName08(self):
return self._raw_data.get('ignoredname08', "")
@property
def IgnoredName09(self):
return self._raw_data.get('ignoredname09', "")
@property
def IgnoredName10(self):
return self._raw_data.get('ignoredname10', "")
@property
def IgnoredName11(self):
return self._raw_data.get('ignoredname11', "")
@property
def IgnoredName12(self):
return self._raw_data.get('ignoredname12', "")
@property
def IgnoredName13(self):
return self._raw_data.get('ignoredname13', "")
@property
def IgnoredName14(self):
return self._raw_data.get('ignoredname14', "")
@property
def IgnoredName15(self):
return self._raw_data.get('ignoredname15', "")
@property
def IgnoredName16(self):
return self._raw_data.get('ignoredname16', "")
class logic_collision_pair(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def attach1(self):
return self._raw_data.get('attach1', "")
@property
def attach2(self):
return self._raw_data.get('attach2', "")
@property
def startdisabled(self):
return self._raw_data.get('startdisabled', "1")
class env_microphone(Parentname, EnableDisable, Targetname):
icon_sprite = "editor/env_microphone.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', None)
@property
def SpeakerName(self):
return self._raw_data.get('speakername', "")
@property
def ListenFilter(self):
return self._raw_data.get('listenfilter', "")
@property
def speaker_dsp_preset(self):
return self._raw_data.get('speaker_dsp_preset', "0")
@property
def Sensitivity(self):
return parse_source_value(self._raw_data.get('sensitivity', 1))
@property
def SmoothFactor(self):
return parse_source_value(self._raw_data.get('smoothfactor', 0))
@property
def MaxRange(self):
return parse_source_value(self._raw_data.get('maxrange', 240))
class math_remap(EnableDisable, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def in1(self):
return parse_source_value(self._raw_data.get('in1', 0))
@property
def in2(self):
return parse_source_value(self._raw_data.get('in2', 1))
@property
def out1(self):
return parse_source_value(self._raw_data.get('out1', None))
@property
def out2(self):
return parse_source_value(self._raw_data.get('out2', None))
class math_colorblend(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def inmin(self):
return parse_source_value(self._raw_data.get('inmin', 0))
@property
def inmax(self):
return parse_source_value(self._raw_data.get('inmax', 1))
@property
def colormin(self):
return parse_int_vector(self._raw_data.get('colormin', "0 0 0"))
@property
def colormax(self):
return parse_int_vector(self._raw_data.get('colormax', "255 255 255"))
class math_counter(EnableDisable, Targetname):
icon_sprite = "editor/math_counter.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def startvalue(self):
return parse_source_value(self._raw_data.get('startvalue', 0))
@property
def min(self):
return parse_source_value(self._raw_data.get('min', 0))
@property
def max(self):
return parse_source_value(self._raw_data.get('max', 0))
class logic_lineto(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def source(self):
return self._raw_data.get('source', None)
@property
def target(self):
return self._raw_data.get('target', None)
class logic_navigation(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', None) # Set to none due to bug in BlackMesa base.fgd file
@property
def navprop(self):
return self._raw_data.get('navprop', "Ignore")
class logic_autosave(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def NewLevelUnit(self):
return self._raw_data.get('newlevelunit', "0")
@property
def MinimumHitPoints(self):
return parse_source_value(self._raw_data.get('minimumhitpoints', 0))
@property
def MinHitPointsToCommit(self):
return parse_source_value(self._raw_data.get('minhitpointstocommit', 0))
class logic_active_autosave(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def MinimumHitPoints(self):
return parse_source_value(self._raw_data.get('minimumhitpoints', 30))
@property
def TriggerHitPoints(self):
return parse_source_value(self._raw_data.get('triggerhitpoints', 75))
@property
def TimeToTrigget(self):
return parse_source_value(self._raw_data.get('timetotrigget', 0))
@property
def DangerousTime(self):
return parse_source_value(self._raw_data.get('dangeroustime', 10))
class point_template(Targetname):
icon_sprite = "editor/point_template.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def Template01(self):
return self._raw_data.get('template01', None)
@property
def Template02(self):
return self._raw_data.get('template02', None)
@property
def Template03(self):
return self._raw_data.get('template03', None)
@property
def Template04(self):
return self._raw_data.get('template04', None)
@property
def Template05(self):
return self._raw_data.get('template05', None)
@property
def Template06(self):
return self._raw_data.get('template06', None)
@property
def Template07(self):
return self._raw_data.get('template07', None)
@property
def Template08(self):
return self._raw_data.get('template08', None)
@property
def Template09(self):
return self._raw_data.get('template09', None)
@property
def Template10(self):
return self._raw_data.get('template10', None)
@property
def Template11(self):
return self._raw_data.get('template11', None)
@property
def Template12(self):
return self._raw_data.get('template12', None)
@property
def Template13(self):
return self._raw_data.get('template13', None)
@property
def Template14(self):
return self._raw_data.get('template14', None)
@property
def Template15(self):
return self._raw_data.get('template15', None)
@property
def Template16(self):
return self._raw_data.get('template16', None)
class env_entity_maker(Parentname, Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def EntityTemplate(self):
return self._raw_data.get('entitytemplate', "")
@property
def PostSpawnSpeed(self):
return parse_source_value(self._raw_data.get('postspawnspeed', 0))
@property
def PostSpawnDirection(self):
return parse_float_vector(self._raw_data.get('postspawndirection', "0 0 0"))
@property
def PostSpawnDirectionVariance(self):
return parse_source_value(self._raw_data.get('postspawndirectionvariance', 0.15))
@property
def PostSpawnInheritAngles(self):
return self._raw_data.get('postspawninheritangles', "0")
class BaseFilter(Targetname):
@property
def Negated(self):
return self._raw_data.get('negated', "Allow entities that match criteria")
class filter_multi(BaseFilter):
icon_sprite = "editor/filter_multiple.vmt"
@property
def filtertype(self):
return self._raw_data.get('filtertype', "0")
@property
def Filter01(self):
return self._raw_data.get('filter01', None)
@property
def Filter02(self):
return self._raw_data.get('filter02', None)
@property
def Filter03(self):
return self._raw_data.get('filter03', None)
@property
def Filter04(self):
return self._raw_data.get('filter04', None)
@property
def Filter05(self):
return self._raw_data.get('filter05', None)
@property
def Filter06(self):
return self._raw_data.get('filter06', None)
@property
def Filter07(self):
return self._raw_data.get('filter07', None)
@property
def Filter08(self):
return self._raw_data.get('filter08', None)
@property
def Filter09(self):
return self._raw_data.get('filter09', None)
@property
def Filter10(self):
return self._raw_data.get('filter10', None)
class filter_activator_name(BaseFilter):
icon_sprite = "editor/filter_name.vmt"
@property
def filtername(self):
return self._raw_data.get('filtername', None)
class filter_activator_model(BaseFilter):
icon_sprite = "editor/filter_name.vmt"
@property
def model(self):
return self._raw_data.get('model', None)
class filter_activator_context(BaseFilter):
icon_sprite = "editor/filter_name.vmt"
@property
def ResponseContext(self):
return self._raw_data.get('responsecontext', None)
class filter_activator_class(BaseFilter):
icon_sprite = "editor/filter_class.vmt"
@property
def filterclass(self):
return self._raw_data.get('filterclass', None)
class filter_activator_mass_greater(BaseFilter):
icon_sprite = "editor/filter_class.vmt"
@property
def filtermass(self):
return parse_source_value(self._raw_data.get('filtermass', None))
class filter_damage_type(BaseFilter):
@property
def damagetype(self):
return self._raw_data.get('damagetype', "64")
class filter_enemy(BaseFilter):
icon_sprite = "editor/filter_class.vmt"
@property
def filtername(self):
return self._raw_data.get('filtername', None)
@property
def filter_radius(self):
return parse_source_value(self._raw_data.get('filter_radius', 0))
@property
def filter_outer_radius(self):
return parse_source_value(self._raw_data.get('filter_outer_radius', 0))
@property
def filter_max_per_enemy(self):
return parse_source_value(self._raw_data.get('filter_max_per_enemy', 0))
class point_anglesensor(Parentname, EnableDisable, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', None)
@property
def lookatname(self):
return self._raw_data.get('lookatname', None)
@property
def duration(self):
return parse_source_value(self._raw_data.get('duration', None))
@property
def tolerance(self):
return parse_source_value(self._raw_data.get('tolerance', None))
class point_angularvelocitysensor(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', None)
@property
def threshold(self):
return parse_source_value(self._raw_data.get('threshold', 0))
@property
def fireinterval(self):
return parse_source_value(self._raw_data.get('fireinterval', 0.2))
@property
def axis(self):
return self._raw_data.get('axis', None)
@property
def usehelper(self):
return self._raw_data.get('usehelper', "0")
class point_velocitysensor(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', None)
@property
def axis(self):
return self._raw_data.get('axis', None)
@property
def enabled(self):
return self._raw_data.get('enabled', "1")
class point_proximity_sensor(Parentname, Angles, EnableDisable, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', None)
class point_teleport(Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', None)
class point_hurt(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def DamageTarget(self):
return self._raw_data.get('damagetarget', "")
@property
def DamageRadius(self):
return parse_source_value(self._raw_data.get('damageradius', 256))
@property
def Damage(self):
return parse_source_value(self._raw_data.get('damage', 5))
@property
def DamageDelay(self):
return parse_source_value(self._raw_data.get('damagedelay', 1))
@property
def DamageType(self):
return self._raw_data.get('damagetype', "0")
class point_playermoveconstraint(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def radius(self):
return parse_source_value(self._raw_data.get('radius', 256))
@property
def width(self):
return parse_source_value(self._raw_data.get('width', 75.0))
@property
def speedfactor(self):
return parse_source_value(self._raw_data.get('speedfactor', 0.15))
class point_push(Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def enabled(self):
return self._raw_data.get('enabled', "1")
@property
def magnitude(self):
return parse_source_value(self._raw_data.get('magnitude', 100))
@property
def radius(self):
return parse_source_value(self._raw_data.get('radius', 128))
@property
def inner_radius(self):
return parse_source_value(self._raw_data.get('inner_radius', 0))
@property
def influence_cone(self):
return parse_source_value(self._raw_data.get('influence_cone', 0))
class func_physbox(BreakableBrush, Origin, RenderFields):
@property
def _minlight(self):
return self._raw_data.get('_minlight', None)
@property
def Damagetype(self):
return self._raw_data.get('damagetype', "0")
@property
def massScale(self):
return parse_source_value(self._raw_data.get('massscale', 0))
@property
def overridescript(self):
return self._raw_data.get('overridescript', "")
@property
def damagetoenablemotion(self):
return parse_source_value(self._raw_data.get('damagetoenablemotion', 0))
@property
def forcetoenablemotion(self):
return parse_source_value(self._raw_data.get('forcetoenablemotion', 0))
@property
def preferredcarryangles(self):
return parse_float_vector(self._raw_data.get('preferredcarryangles', "0 0 0"))
@property
def notsolid(self):
return self._raw_data.get('notsolid', "0")
class TwoObjectPhysics(Targetname):
@property
def attach1(self):
return self._raw_data.get('attach1', "")
@property
def attach2(self):
return self._raw_data.get('attach2', "")
@property
def constraintsystem(self):
return self._raw_data.get('constraintsystem', "")
@property
def forcelimit(self):
return parse_source_value(self._raw_data.get('forcelimit', 0))
@property
def torquelimit(self):
return parse_source_value(self._raw_data.get('torquelimit', 0))
@property
def breaksound(self):
return self._raw_data.get('breaksound', "")
@property
def teleportfollowdistance(self):
return parse_source_value(self._raw_data.get('teleportfollowdistance', 0))
class phys_constraintsystem(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def additionaliterations(self):
return parse_source_value(self._raw_data.get('additionaliterations', 0))
class phys_keepupright(Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def attach1(self):
return self._raw_data.get('attach1', "")
@property
def angularlimit(self):
return parse_source_value(self._raw_data.get('angularlimit', 15))
class physics_cannister(Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def model(self):
return self._raw_data.get('model', "models/fire_equipment/w_weldtank.mdl")
@property
def expdamage(self):
return self._raw_data.get('expdamage', "200.0")
@property
def expradius(self):
return self._raw_data.get('expradius', "250.0")
@property
def health(self):
return parse_source_value(self._raw_data.get('health', 25))
@property
def thrust(self):
return self._raw_data.get('thrust', "3000.0")
@property
def fuel(self):
return self._raw_data.get('fuel', "12.0")
@property
def rendercolor(self):
return parse_int_vector(self._raw_data.get('rendercolor', "255 255 255"))
@property
def renderamt(self):
return parse_source_value(self._raw_data.get('renderamt', 128))
@property
def gassound(self):
return self._raw_data.get('gassound', "ambient/objects/cannister_loop.wav")
class info_constraint_anchor(Parentname, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def massScale(self):
return parse_source_value(self._raw_data.get('massscale', 1))
class info_mass_center(Base):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', "")
class phys_spring(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def attach1(self):
return self._raw_data.get('attach1', "")
@property
def attach2(self):
return self._raw_data.get('attach2', "")
@property
def springaxis(self):
return self._raw_data.get('springaxis', "")
@property
def length(self):
return self._raw_data.get('length', "0")
@property
def constant(self):
return self._raw_data.get('constant', "50")
@property
def damping(self):
return self._raw_data.get('damping', "2.0")
@property
def relativedamping(self):
return self._raw_data.get('relativedamping', "0.1")
@property
def breaklength(self):
return self._raw_data.get('breaklength', "0")
class phys_hinge(TwoObjectPhysics):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def hingefriction(self):
return parse_source_value(self._raw_data.get('hingefriction', 0))
@property
def hingeaxis(self):
return self._raw_data.get('hingeaxis', None)
@property
def SystemLoadScale(self):
return parse_source_value(self._raw_data.get('systemloadscale', 1))
@property
def minSoundThreshold(self):
return parse_source_value(self._raw_data.get('minsoundthreshold', 6))
@property
def maxSoundThreshold(self):
return parse_source_value(self._raw_data.get('maxsoundthreshold', 80))
@property
def slidesoundfwd(self):
return self._raw_data.get('slidesoundfwd', "")
@property
def slidesoundback(self):
return self._raw_data.get('slidesoundback', "")
@property
def reversalsoundthresholdSmall(self):
return parse_source_value(self._raw_data.get('reversalsoundthresholdsmall', 0))
@property
def reversalsoundthresholdMedium(self):
return parse_source_value(self._raw_data.get('reversalsoundthresholdmedium', 0))
@property
def reversalsoundthresholdLarge(self):
return parse_source_value(self._raw_data.get('reversalsoundthresholdlarge', 0))
@property
def reversalsoundSmall(self):
return self._raw_data.get('reversalsoundsmall', "")
@property
def reversalsoundMedium(self):
return self._raw_data.get('reversalsoundmedium', "")
@property
def reversalsoundLarge(self):
return self._raw_data.get('reversalsoundlarge', "")
class phys_ballsocket(TwoObjectPhysics):
icon_sprite = "editor/phys_ballsocket.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class phys_constraint(TwoObjectPhysics):
model_ = "models/editor/axis_helper.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class phys_pulleyconstraint(TwoObjectPhysics):
model_ = "models/editor/axis_helper.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def addlength(self):
return parse_source_value(self._raw_data.get('addlength', 0))
@property
def gearratio(self):
return parse_source_value(self._raw_data.get('gearratio', 1))
@property
def position2(self):
return self._raw_data.get('position2', None)
class phys_slideconstraint(TwoObjectPhysics):
model_ = "models/editor/axis_helper.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def slideaxis(self):
return self._raw_data.get('slideaxis', None)
@property
def slidefriction(self):
return parse_source_value(self._raw_data.get('slidefriction', 0))
@property
def SystemLoadScale(self):
return parse_source_value(self._raw_data.get('systemloadscale', 1))
@property
def minSoundThreshold(self):
return parse_source_value(self._raw_data.get('minsoundthreshold', 6))
@property
def maxSoundThreshold(self):
return parse_source_value(self._raw_data.get('maxsoundthreshold', 80))
@property
def slidesoundfwd(self):
return self._raw_data.get('slidesoundfwd', "")
@property
def slidesoundback(self):
return self._raw_data.get('slidesoundback', "")
@property
def reversalsoundthresholdSmall(self):
return parse_source_value(self._raw_data.get('reversalsoundthresholdsmall', 0))
@property
def reversalsoundthresholdMedium(self):
return parse_source_value(self._raw_data.get('reversalsoundthresholdmedium', 0))
@property
def reversalsoundthresholdLarge(self):
return parse_source_value(self._raw_data.get('reversalsoundthresholdlarge', 0))
@property
def reversalsoundSmall(self):
return self._raw_data.get('reversalsoundsmall', "")
@property
def reversalsoundMedium(self):
return self._raw_data.get('reversalsoundmedium', "")
@property
def reversalsoundLarge(self):
return self._raw_data.get('reversalsoundlarge', "")
class phys_lengthconstraint(TwoObjectPhysics):
model_ = "models/editor/axis_helper.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def addlength(self):
return parse_source_value(self._raw_data.get('addlength', 0))
@property
def minlength(self):
return parse_source_value(self._raw_data.get('minlength', 0))
@property
def attachpoint(self):
return self._raw_data.get('attachpoint', None) # Set to none due to bug in BlackMesa base.fgd file
class phys_ragdollconstraint(TwoObjectPhysics):
model_ = "models/editor/axis_helper.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def xmin(self):
return parse_source_value(self._raw_data.get('xmin', -90))
@property
def xmax(self):
return parse_source_value(self._raw_data.get('xmax', 90))
@property
def ymin(self):
return parse_source_value(self._raw_data.get('ymin', 0))
@property
def ymax(self):
return parse_source_value(self._raw_data.get('ymax', 0))
@property
def zmin(self):
return parse_source_value(self._raw_data.get('zmin', 0))
@property
def zmax(self):
return parse_source_value(self._raw_data.get('zmax', 0))
@property
def xfriction(self):
return parse_source_value(self._raw_data.get('xfriction', 0))
@property
def yfriction(self):
return parse_source_value(self._raw_data.get('yfriction', 0))
@property
def zfriction(self):
return parse_source_value(self._raw_data.get('zfriction', 0))
class phys_convert(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', None)
@property
def swapmodel(self):
return self._raw_data.get('swapmodel', None)
@property
def massoverride(self):
return parse_source_value(self._raw_data.get('massoverride', 0))
class ForceController(Targetname):
@property
def attach1(self):
return self._raw_data.get('attach1', "")
@property
def forcetime(self):
return self._raw_data.get('forcetime', "0")
class phys_thruster(Angles, ForceController):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def force(self):
return self._raw_data.get('force', "0")
class phys_torque(ForceController):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def force(self):
return self._raw_data.get('force', "0")
@property
def axis(self):
return self._raw_data.get('axis', "")
class phys_motor(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def speed(self):
return self._raw_data.get('speed', "0")
@property
def spinup(self):
return self._raw_data.get('spinup', "1")
@property
def inertiafactor(self):
return parse_source_value(self._raw_data.get('inertiafactor', 1.0))
@property
def axis(self):
return self._raw_data.get('axis', "")
@property
def attach1(self):
return self._raw_data.get('attach1', "")
class phys_magnet(Studiomodel, Parentname, Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def forcelimit(self):
return parse_source_value(self._raw_data.get('forcelimit', 0))
@property
def torquelimit(self):
return parse_source_value(self._raw_data.get('torquelimit', 0))
@property
def massScale(self):
return parse_source_value(self._raw_data.get('massscale', 0))
@property
def overridescript(self):
return self._raw_data.get('overridescript', "")
@property
def maxobjects(self):
return parse_source_value(self._raw_data.get('maxobjects', 0))
class prop_detail_base(Base):
@property
def model(self):
return self._raw_data.get('model', None)
class prop_static_base(Angles, SystemLevelChoice):
@property
def model(self):
return self._raw_data.get('model', None)
@property
def skin(self):
return parse_source_value(self._raw_data.get('skin', 0))
@property
def solid(self):
return self._raw_data.get('solid', "6")
@property
def disableshadows(self):
return self._raw_data.get('disableshadows', "0")
@property
def fademindist(self):
return parse_source_value(self._raw_data.get('fademindist', -1))
@property
def fademaxdist(self):
return parse_source_value(self._raw_data.get('fademaxdist', 0))
@property
def fadescale(self):
return parse_source_value(self._raw_data.get('fadescale', 1))
@property
def lightingorigin(self):
return self._raw_data.get('lightingorigin', "")
@property
def disablevertexlighting(self):
return self._raw_data.get('disablevertexlighting', "1")
@property
def disableselfshadowing(self):
return self._raw_data.get('disableselfshadowing', "1")
@property
def ignorenormals(self):
return self._raw_data.get('ignorenormals', "0")
@property
def renderamt(self):
return parse_source_value(self._raw_data.get('renderamt', 255))
@property
def rendercolor(self):
return parse_int_vector(self._raw_data.get('rendercolor', "255 255 255"))
class prop_dynamic_base(Parentname, Glow, Studiomodel, Angles, BreakableProp, Global, RenderFields):
@property
def solid(self):
return self._raw_data.get('solid', "6")
@property
def DefaultAnim(self):
return self._raw_data.get('defaultanim', "")
@property
def RandomAnimation(self):
return self._raw_data.get('randomanimation', "0")
@property
def MinAnimTime(self):
return parse_source_value(self._raw_data.get('minanimtime', 5))
@property
def MaxAnimTime(self):
return parse_source_value(self._raw_data.get('maxanimtime', 10))
@property
def SetBodyGroup(self):
return parse_source_value(self._raw_data.get('setbodygroup', 0))
@property
def LagCompensate(self):
return self._raw_data.get('lagcompensate', "0")
@property
def glowbackfacemult(self):
return parse_source_value(self._raw_data.get('glowbackfacemult', 1.0))
@property
def lightingorigin(self):
return self._raw_data.get('lightingorigin', "")
@property
def updatechildren(self):
return self._raw_data.get('updatechildren', "0")
class prop_detail(prop_detail_base):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class prop_static(prop_static_base):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class prop_dynamic(prop_dynamic_base, EnableDisable):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class prop_dynamic_override(prop_dynamic_base):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def health(self):
return parse_source_value(self._raw_data.get('health', 0))
class BasePropPhysics(Glow, Studiomodel, Angles, BreakableProp, Global, SystemLevelChoice):
@property
def minhealthdmg(self):
return parse_source_value(self._raw_data.get('minhealthdmg', 0))
@property
def shadowcastdist(self):
return parse_source_value(self._raw_data.get('shadowcastdist', 0))
@property
def physdamagescale(self):
return parse_source_value(self._raw_data.get('physdamagescale', 0.1))
@property
def Damagetype(self):
return self._raw_data.get('damagetype', "0")
@property
def nodamageforces(self):
return self._raw_data.get('nodamageforces', "0")
@property
def inertiaScale(self):
return parse_source_value(self._raw_data.get('inertiascale', 1.0))
@property
def massScale(self):
return parse_source_value(self._raw_data.get('massscale', 0))
@property
def overridescript(self):
return self._raw_data.get('overridescript', "")
@property
def damagetoenablemotion(self):
return parse_source_value(self._raw_data.get('damagetoenablemotion', 0))
@property
def forcetoenablemotion(self):
return parse_source_value(self._raw_data.get('forcetoenablemotion', 0))
@property
def puntsound(self):
return self._raw_data.get('puntsound', None)
class prop_physics_override(BasePropPhysics, BaseFadeProp):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def health(self):
return parse_source_value(self._raw_data.get('health', 0))
class prop_physics(RenderFields, BasePropPhysics):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def BreakableType(self):
return self._raw_data.get('breakabletype', "0")
class prop_physics_multiplayer(prop_physics):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def physicsmode(self):
return self._raw_data.get('physicsmode', "0")
class prop_ragdoll(Studiomodel, Angles, SystemLevelChoice, EnableDisable, BaseFadeProp, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def angleOverride(self):
return self._raw_data.get('angleoverride', "")
class prop_dynamic_ornament(prop_dynamic_base):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def InitialOwner(self):
return self._raw_data.get('initialowner', None)
class func_areaportal(Targetname):
@property
def target(self):
return self._raw_data.get('target', None)
@property
def StartOpen(self):
return self._raw_data.get('startopen', "1")
@property
def PortalVersion(self):
return parse_source_value(self._raw_data.get('portalversion', 1))
class func_occluder(Targetname):
@property
def StartActive(self):
return self._raw_data.get('startactive', "1")
class func_breakable(RenderFields, BreakableBrush, Origin):
@property
def minhealthdmg(self):
return parse_source_value(self._raw_data.get('minhealthdmg', 0))
@property
def _minlight(self):
return self._raw_data.get('_minlight', None)
@property
def physdamagescale(self):
return parse_source_value(self._raw_data.get('physdamagescale', 1.0))
@property
def BreakableType(self):
return self._raw_data.get('breakabletype', "0")
class func_breakable_surf(RenderFields, BreakableBrush):
@property
def fragility(self):
return parse_source_value(self._raw_data.get('fragility', 100))
@property
def surfacetype(self):
return self._raw_data.get('surfacetype', "0")
class func_conveyor(RenderFields, Parentname, Shadow, Targetname):
@property
def movedir(self):
return parse_float_vector(self._raw_data.get('movedir', "0 0 0"))
@property
def speed(self):
return self._raw_data.get('speed', "100")
@property
def _minlight(self):
return self._raw_data.get('_minlight', None)
class func_detail(Base):
pass
class func_viscluster(Base):
pass
class func_illusionary(Parentname, Shadow, Origin, RenderFields, Targetname):
@property
def _minlight(self):
return self._raw_data.get('_minlight', None)
class func_precipitation(Parentname, Targetname):
@property
def renderamt(self):
return parse_source_value(self._raw_data.get('renderamt', 100))
@property
def rendercolor(self):
return parse_int_vector(self._raw_data.get('rendercolor', "100 100 100"))
@property
def preciptype(self):
return self._raw_data.get('preciptype', "0")
@property
def minSpeed(self):
return parse_source_value(self._raw_data.get('minspeed', 25))
@property
def maxSpeed(self):
return parse_source_value(self._raw_data.get('maxspeed', 35))
class func_precipitation_blocker(Parentname, Targetname):
pass
class func_detail_blocker(Parentname, Targetname):
pass
class func_wall_toggle(func_wall):
pass
class func_guntarget(RenderFields, Parentname, Global, Targetname):
@property
def speed(self):
return parse_source_value(self._raw_data.get('speed', 100))
@property
def target(self):
return self._raw_data.get('target', None)
@property
def health(self):
return parse_source_value(self._raw_data.get('health', 0))
@property
def _minlight(self):
return self._raw_data.get('_minlight', None)
class func_fish_pool(Base):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def model(self):
return self._raw_data.get('model', "models/Junkola.mdl")
@property
def fish_count(self):
return parse_source_value(self._raw_data.get('fish_count', 10))
@property
def max_range(self):
return parse_source_value(self._raw_data.get('max_range', 150))
class PlatSounds(Base):
@property
def movesnd(self):
return self._raw_data.get('movesnd', "0")
@property
def stopsnd(self):
return self._raw_data.get('stopsnd', "0")
@property
def volume(self):
return self._raw_data.get('volume', "0.85")
class Trackchange(Parentname, PlatSounds, Global, RenderFields, Targetname):
@property
def height(self):
return parse_source_value(self._raw_data.get('height', 0))
@property
def rotation(self):
return parse_source_value(self._raw_data.get('rotation', 0))
@property
def train(self):
return self._raw_data.get('train', None)
@property
def toptrack(self):
return self._raw_data.get('toptrack', None)
@property
def bottomtrack(self):
return self._raw_data.get('bottomtrack', None)
@property
def speed(self):
return parse_source_value(self._raw_data.get('speed', 0))
class BaseTrain(Parentname, Shadow, Global, Origin, RenderFields, Targetname):
@property
def target(self):
return self._raw_data.get('target', "")
@property
def startspeed(self):
return parse_source_value(self._raw_data.get('startspeed', 100))
@property
def speed(self):
return parse_source_value(self._raw_data.get('speed', 0))
@property
def velocitytype(self):
return self._raw_data.get('velocitytype', "0")
@property
def orientationtype(self):
return self._raw_data.get('orientationtype', "1")
@property
def wheels(self):
return parse_source_value(self._raw_data.get('wheels', 50))
@property
def height(self):
return parse_source_value(self._raw_data.get('height', 4))
@property
def bank(self):
return self._raw_data.get('bank', "0")
@property
def dmg(self):
return parse_source_value(self._raw_data.get('dmg', 0))
@property
def _minlight(self):
return self._raw_data.get('_minlight', None)
@property
def MoveSound(self):
return self._raw_data.get('movesound', "")
@property
def MovePingSound(self):
return self._raw_data.get('movepingsound', "")
@property
def StartSound(self):
return self._raw_data.get('startsound', "")
@property
def StopSound(self):
return self._raw_data.get('stopsound', "")
@property
def volume(self):
return parse_source_value(self._raw_data.get('volume', 10))
@property
def MoveSoundMinPitch(self):
return parse_source_value(self._raw_data.get('movesoundminpitch', 60))
@property
def MoveSoundMaxPitch(self):
return parse_source_value(self._raw_data.get('movesoundmaxpitch', 200))
@property
def MoveSoundMinTime(self):
return parse_source_value(self._raw_data.get('movesoundmintime', 0))
@property
def MoveSoundMaxTime(self):
return parse_source_value(self._raw_data.get('movesoundmaxtime', 0))
class func_trackautochange(Trackchange):
@property
def _minlight(self):
return self._raw_data.get('_minlight', None)
class func_trackchange(Trackchange):
@property
def _minlight(self):
return self._raw_data.get('_minlight', None)
class func_tracktrain(BaseTrain):
pass
class func_tanktrain(BaseTrain):
@property
def health(self):
return parse_source_value(self._raw_data.get('health', 100))
class func_traincontrols(Parentname, Global):
@property
def target(self):
return self._raw_data.get('target', None)
class tanktrain_aitarget(Targetname):
icon_sprite = "editor/tanktrain_aitarget.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', None)
@property
def newtarget(self):
return self._raw_data.get('newtarget', None)
class tanktrain_ai(Targetname):
icon_sprite = "editor/tanktrain_ai.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', None)
@property
def startsound(self):
return self._raw_data.get('startsound', "vehicles/diesel_start1.wav")
@property
def enginesound(self):
return self._raw_data.get('enginesound', "vehicles/diesel_turbo_loop1.wav")
@property
def movementsound(self):
return self._raw_data.get('movementsound', "vehicles/tank_treads_loop1.wav")
class path_track(Parentname, Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', None)
@property
def altpath(self):
return self._raw_data.get('altpath', None)
@property
def speed(self):
return parse_source_value(self._raw_data.get('speed', 0))
@property
def radius(self):
return parse_source_value(self._raw_data.get('radius', 0))
@property
def orientationtype(self):
return self._raw_data.get('orientationtype', "1")
class test_traceline(Angles):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class trigger_autosave(Targetname):
@property
def master(self):
return self._raw_data.get('master', None)
@property
def NewLevelUnit(self):
return self._raw_data.get('newlevelunit', "0")
@property
def DangerousTimer(self):
return parse_source_value(self._raw_data.get('dangeroustimer', 0))
@property
def MinimumHitPoints(self):
return parse_source_value(self._raw_data.get('minimumhitpoints', 0))
class trigger_changelevel(EnableDisable):
@property
def targetname(self):
return self._raw_data.get('targetname', None)
@property
def map(self):
return self._raw_data.get('map', None)
@property
def landmark(self):
return self._raw_data.get('landmark', None)
class trigger_gravity(Trigger):
@property
def gravity(self):
return parse_source_value(self._raw_data.get('gravity', 1))
class trigger_playermovement(Trigger):
pass
class trigger_soundscape(Trigger):
@property
def soundscape(self):
return self._raw_data.get('soundscape', None)
class trigger_hurt(Trigger):
@property
def master(self):
return self._raw_data.get('master', None)
@property
def damage(self):
return parse_source_value(self._raw_data.get('damage', 10))
@property
def damagecap(self):
return parse_source_value(self._raw_data.get('damagecap', 20))
@property
def damagetype(self):
return self._raw_data.get('damagetype', "0")
@property
def damagemodel(self):
return self._raw_data.get('damagemodel', "0")
@property
def nodmgforce(self):
return self._raw_data.get('nodmgforce', "0")
@property
def damageforce(self):
return parse_float_vector(self._raw_data.get('damageforce', None))
@property
def thinkalways(self):
return self._raw_data.get('thinkalways', "0")
class trigger_remove(Trigger):
pass
class trigger_multiple(Trigger):
@property
def wait(self):
return parse_source_value(self._raw_data.get('wait', 1))
@property
def entireteam(self):
return self._raw_data.get('entireteam', "0")
@property
def allowincap(self):
return self._raw_data.get('allowincap', "0")
@property
def allowghost(self):
return self._raw_data.get('allowghost', "0")
class trigger_once(TriggerOnce):
pass
class trigger_look(Trigger):
@property
def target(self):
return self._raw_data.get('target', None)
@property
def LookTime(self):
return self._raw_data.get('looktime', "0.5")
@property
def FieldOfView(self):
return self._raw_data.get('fieldofview', "0.9")
@property
def Timeout(self):
return parse_source_value(self._raw_data.get('timeout', 0))
class trigger_push(Trigger):
@property
def pushdir(self):
return parse_float_vector(self._raw_data.get('pushdir', "0 0 0"))
@property
def speed(self):
return parse_source_value(self._raw_data.get('speed', 40))
@property
def alternateticksfix(self):
return parse_source_value(self._raw_data.get('alternateticksfix', 0))
@property
def triggeronstarttouch(self):
return self._raw_data.get('triggeronstarttouch', "0")
class trigger_wind(Angles, Trigger):
@property
def Speed(self):
return parse_source_value(self._raw_data.get('speed', 200))
@property
def SpeedNoise(self):
return parse_source_value(self._raw_data.get('speednoise', 0))
@property
def DirectionNoise(self):
return parse_source_value(self._raw_data.get('directionnoise', 10))
@property
def HoldTime(self):
return parse_source_value(self._raw_data.get('holdtime', 0))
@property
def HoldNoise(self):
return parse_source_value(self._raw_data.get('holdnoise', 0))
class trigger_impact(Angles, Origin, Targetname):
@property
def Magnitude(self):
return parse_source_value(self._raw_data.get('magnitude', 200))
@property
def noise(self):
return parse_source_value(self._raw_data.get('noise', 0.1))
@property
def viewkick(self):
return parse_source_value(self._raw_data.get('viewkick', 0.05))
class trigger_proximity(Trigger):
@property
def measuretarget(self):
return self._raw_data.get('measuretarget', None)
@property
def radius(self):
return self._raw_data.get('radius', "256")
class trigger_teleport(Trigger):
@property
def target(self):
return self._raw_data.get('target', None)
@property
def landmark(self):
return self._raw_data.get('landmark', None)
class trigger_transition(Targetname):
pass
class trigger_serverragdoll(Targetname):
pass
class ai_speechfilter(ResponseContext, EnableDisable, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def subject(self):
return self._raw_data.get('subject', "")
@property
def IdleModifier(self):
return parse_source_value(self._raw_data.get('idlemodifier', 1.0))
@property
def NeverSayHello(self):
return self._raw_data.get('neversayhello', "0")
class water_lod_control(Targetname):
icon_sprite = "editor/waterlodcontrol.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def cheapwaterstartdistance(self):
return parse_source_value(self._raw_data.get('cheapwaterstartdistance', 1000))
@property
def cheapwaterenddistance(self):
return parse_source_value(self._raw_data.get('cheapwaterenddistance', 2000))
class info_camera_link(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', None)
@property
def PointCamera(self):
return self._raw_data.get('pointcamera', None)
class logic_measure_movement(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def MeasureTarget(self):
return self._raw_data.get('measuretarget', "")
@property
def MeasureReference(self):
return self._raw_data.get('measurereference', "")
@property
def Target(self):
return self._raw_data.get('target', "")
@property
def TargetReference(self):
return self._raw_data.get('targetreference', "")
@property
def TargetScale(self):
return parse_source_value(self._raw_data.get('targetscale', 1))
@property
def MeasureType(self):
return self._raw_data.get('measuretype', "0")
class npc_furniture(Parentname, BaseNPC):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def model(self):
return self._raw_data.get('model', None)
class env_credits(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class material_modify_control(Parentname, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def materialName(self):
return self._raw_data.get('materialname', None)
@property
def materialVar(self):
return self._raw_data.get('materialvar', None)
class point_devshot_camera(Angles):
viewport_model = "models/editor/camera.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def cameraname(self):
return self._raw_data.get('cameraname', "")
@property
def FOV(self):
return parse_source_value(self._raw_data.get('fov', 75))
class logic_playerproxy(DamageFilter, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class env_spritetrail(Parentname, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def lifetime(self):
return parse_source_value(self._raw_data.get('lifetime', 0.5))
@property
def startwidth(self):
return parse_source_value(self._raw_data.get('startwidth', 8.0))
@property
def endwidth(self):
return parse_source_value(self._raw_data.get('endwidth', 1.0))
@property
def spritename(self):
return self._raw_data.get('spritename', "sprites/bluelaser1.vmt")
@property
def renderamt(self):
return parse_source_value(self._raw_data.get('renderamt', 255))
@property
def rendercolor(self):
return parse_int_vector(self._raw_data.get('rendercolor', "255 255 255"))
@property
def rendermode(self):
return self._raw_data.get('rendermode', "5")
class env_projectedtexture(Parentname, Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def target(self):
return self._raw_data.get('target', None)
@property
def lightfov(self):
return parse_source_value(self._raw_data.get('lightfov', 90.0))
@property
def nearz(self):
return parse_source_value(self._raw_data.get('nearz', 4.0))
@property
def farz(self):
return parse_source_value(self._raw_data.get('farz', 750.0))
@property
def enableshadows(self):
return self._raw_data.get('enableshadows', "0")
@property
def shadowquality(self):
return self._raw_data.get('shadowquality', "1")
@property
def lightonlytarget(self):
return self._raw_data.get('lightonlytarget', "0")
@property
def lightworld(self):
return self._raw_data.get('lightworld', "1")
@property
def lightcolor(self):
return parse_int_vector(self._raw_data.get('lightcolor', "255 255 255 200"))
@property
def cameraspace(self):
return parse_source_value(self._raw_data.get('cameraspace', 0))
class func_reflective_glass(func_brush):
pass
class env_particle_performance_monitor(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class npc_puppet(Studiomodel, Parentname, BaseNPC):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def animationtarget(self):
return self._raw_data.get('animationtarget', "")
@property
def attachmentname(self):
return self._raw_data.get('attachmentname', "")
class point_gamestats_counter(Targetname, EnableDisable, Origin):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def Name(self):
return self._raw_data.get('name', None)
class func_instance(Angles):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def targetname(self):
return self._raw_data.get('targetname', None)
@property
def spawnpositionname(self):
return self._raw_data.get('spawnpositionname', None)
@property
def file(self):
return self._raw_data.get('file', None)
@property
def fixup_style(self):
return self._raw_data.get('fixup_style', "0")
@property
def propagate_fixup(self):
return self._raw_data.get('propagate_fixup', "0")
@property
def replace01(self):
return self._raw_data.get('replace01', None)
@property
def replace02(self):
return self._raw_data.get('replace02', None)
@property
def replace03(self):
return self._raw_data.get('replace03', None)
@property
def replace04(self):
return self._raw_data.get('replace04', None)
@property
def replace05(self):
return self._raw_data.get('replace05', None)
@property
def replace06(self):
return self._raw_data.get('replace06', None)
@property
def replace07(self):
return self._raw_data.get('replace07', None)
@property
def replace08(self):
return self._raw_data.get('replace08', None)
@property
def replace09(self):
return self._raw_data.get('replace09', None)
@property
def replace10(self):
return self._raw_data.get('replace10', None)
class func_instance_parms(Base):
icon_sprite = "editor/func_instance_parms.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def parm1(self):
return self._raw_data.get('parm1', None)
@property
def parm2(self):
return self._raw_data.get('parm2', None)
@property
def parm3(self):
return self._raw_data.get('parm3', None)
@property
def parm4(self):
return self._raw_data.get('parm4', None)
@property
def parm5(self):
return self._raw_data.get('parm5', None)
@property
def parm6(self):
return self._raw_data.get('parm6', None)
@property
def parm7(self):
return self._raw_data.get('parm7', None)
@property
def parm8(self):
return self._raw_data.get('parm8', None)
@property
def parm9(self):
return self._raw_data.get('parm9', None)
@property
def parm10(self):
return self._raw_data.get('parm10', None)
class env_instructor_hint(Targetname):
icon_sprite = "editor/env_instructor_hint.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def hint_target(self):
return self._raw_data.get('hint_target', None)
@property
def hint_name(self):
return self._raw_data.get('hint_name', None)
@property
def hint_static(self):
return self._raw_data.get('hint_static', "0")
@property
def hint_allow_nodraw_target(self):
return self._raw_data.get('hint_allow_nodraw_target', "1")
@property
def hint_caption(self):
return self._raw_data.get('hint_caption', None)
@property
def hint_color(self):
return parse_int_vector(self._raw_data.get('hint_color', "255 255 255"))
@property
def hint_forcecaption(self):
return self._raw_data.get('hint_forcecaption', "0")
@property
def hint_icon_onscreen(self):
return self._raw_data.get('hint_icon_onscreen', "icon_tip")
@property
def hint_icon_offscreen(self):
return self._raw_data.get('hint_icon_offscreen', "icon_tip")
@property
def hint_nooffscreen(self):
return self._raw_data.get('hint_nooffscreen', "0")
@property
def hint_binding(self):
return self._raw_data.get('hint_binding', None)
@property
def hint_icon_offset(self):
return parse_source_value(self._raw_data.get('hint_icon_offset', 0))
@property
def hint_pulseoption(self):
return self._raw_data.get('hint_pulseoption', "0")
@property
def hint_alphaoption(self):
return self._raw_data.get('hint_alphaoption', "0")
@property
def hint_shakeoption(self):
return self._raw_data.get('hint_shakeoption', "0")
@property
def hint_timeout(self):
return parse_source_value(self._raw_data.get('hint_timeout', 0))
@property
def hint_display_limit(self):
return parse_source_value(self._raw_data.get('hint_display_limit', 0))
@property
def hint_range(self):
return parse_source_value(self._raw_data.get('hint_range', 0))
@property
def hint_instance_type(self):
return self._raw_data.get('hint_instance_type', "2")
@property
def hint_auto_start(self):
return self._raw_data.get('hint_auto_start', "1")
@property
def hint_suppress_rest(self):
return self._raw_data.get('hint_suppress_rest', "0")
class info_target_instructor_hint(Parentname, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class logic_script(Targetname):
icon_sprite = "editor/logic_script.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def Group00(self):
return self._raw_data.get('group00', None)
@property
def Group01(self):
return self._raw_data.get('group01', None)
@property
def Group02(self):
return self._raw_data.get('group02', None)
@property
def Group03(self):
return self._raw_data.get('group03', None)
@property
def Group04(self):
return self._raw_data.get('group04', None)
@property
def Group05(self):
return self._raw_data.get('group05', None)
@property
def Group06(self):
return self._raw_data.get('group06', None)
@property
def Group07(self):
return self._raw_data.get('group07', None)
@property
def Group08(self):
return self._raw_data.get('group08', None)
@property
def Group09(self):
return self._raw_data.get('group09', None)
@property
def Group10(self):
return self._raw_data.get('group10', None)
@property
def Group11(self):
return self._raw_data.get('group11', None)
@property
def Group12(self):
return self._raw_data.get('group12', None)
@property
def Group13(self):
return self._raw_data.get('group13', None)
@property
def Group14(self):
return self._raw_data.get('group14', None)
@property
def Group15(self):
return self._raw_data.get('group15', None)
class func_timescale(Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def desiredTimescale(self):
return parse_source_value(self._raw_data.get('desiredtimescale', 1.0))
@property
def acceleration(self):
return parse_source_value(self._raw_data.get('acceleration', 0.05))
@property
def minBlendRate(self):
return parse_source_value(self._raw_data.get('minblendrate', 0.1))
@property
def blendDeltaMultiplier(self):
return parse_source_value(self._raw_data.get('blenddeltamultiplier', 3.0))
class func_block_charge(func_brush):
pass
class info_ambient_mob_start(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class info_ambient_mob_end(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class info_ambient_mob(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class info_item_position(Parentname, Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def model(self):
return self._raw_data.get('model', None)
@property
def group(self):
return parse_source_value(self._raw_data.get('group', 0))
@property
def rarity(self):
return self._raw_data.get('rarity', "0")
@property
def replace01(self):
return self._raw_data.get('replace01', None)
@property
def replace02(self):
return self._raw_data.get('replace02', None)
@property
def replace03(self):
return self._raw_data.get('replace03', None)
@property
def replace04(self):
return self._raw_data.get('replace04', None)
@property
def replace05(self):
return self._raw_data.get('replace05', None)
@property
def replace06(self):
return self._raw_data.get('replace06', None)
@property
def replace07(self):
return self._raw_data.get('replace07', None)
@property
def replace08(self):
return self._raw_data.get('replace08', None)
@property
def replace09(self):
return self._raw_data.get('replace09', None)
@property
def replace10(self):
return self._raw_data.get('replace10', None)
class info_l4d1_survivor_spawn(Targetname):
model_ = "models/survivors/survivor_biker.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def character(self):
return self._raw_data.get('character', "5")
class env_airstrike_indoors(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def height(self):
return self._raw_data.get('height', "-1")
class env_airstrike_outdoors(Parentname, Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def model(self):
return self._raw_data.get('model', "models/props_destruction/general_dest_roof_set.mdl")
@property
def modelgroup(self):
return self._raw_data.get('modelgroup', "")
@property
def sequence1(self):
return self._raw_data.get('sequence1', "")
@property
def sequence2(self):
return self._raw_data.get('sequence2', "")
class point_viewcontrol_multiplayer(Parentname, Angles, Targetname):
viewport_model = "models/editor/camera.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def fov(self):
return parse_source_value(self._raw_data.get('fov', 90))
@property
def fov_rate(self):
return parse_source_value(self._raw_data.get('fov_rate', 1.0))
@property
def target_entity(self):
return self._raw_data.get('target_entity', "")
@property
def interp_time(self):
return parse_source_value(self._raw_data.get('interp_time', 1.0))
class point_viewcontrol_survivor(Parentname, Angles, Targetname):
viewport_model = "models/editor/camera.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def fov(self):
return parse_source_value(self._raw_data.get('fov', 90))
@property
def fov_rate(self):
return parse_source_value(self._raw_data.get('fov_rate', 1.0))
class point_deathfall_camera(Parentname, Angles, Targetname):
viewport_model = "models/editor/camera.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def fov(self):
return parse_source_value(self._raw_data.get('fov', 90))
@property
def fov_rate(self):
return parse_source_value(self._raw_data.get('fov_rate', 1.0))
class logic_choreographed_scene(Targetname):
icon_sprite = "editor/choreo_scene.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def SceneFile(self):
return self._raw_data.get('scenefile', None)
@property
def target1(self):
return self._raw_data.get('target1', None)
@property
def target2(self):
return self._raw_data.get('target2', None)
@property
def target3(self):
return self._raw_data.get('target3', None)
@property
def target4(self):
return self._raw_data.get('target4', None)
@property
def target5(self):
return self._raw_data.get('target5', None)
@property
def target6(self):
return self._raw_data.get('target6', None)
@property
def target7(self):
return self._raw_data.get('target7', None)
@property
def target8(self):
return self._raw_data.get('target8', None)
@property
def busyactor(self):
return self._raw_data.get('busyactor', "1")
@property
def onplayerdeath(self):
return self._raw_data.get('onplayerdeath', "0")
class logic_scene_list_manager(Targetname):
icon_sprite = "editor/choreo_manager.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def scene0(self):
return self._raw_data.get('scene0', "")
@property
def scene1(self):
return self._raw_data.get('scene1', "")
@property
def scene2(self):
return self._raw_data.get('scene2', "")
@property
def scene3(self):
return self._raw_data.get('scene3', "")
@property
def scene4(self):
return self._raw_data.get('scene4', "")
@property
def scene5(self):
return self._raw_data.get('scene5', "")
@property
def scene6(self):
return self._raw_data.get('scene6', "")
@property
def scene7(self):
return self._raw_data.get('scene7', "")
@property
def scene8(self):
return self._raw_data.get('scene8', "")
@property
def scene9(self):
return self._raw_data.get('scene9', "")
@property
def scene10(self):
return self._raw_data.get('scene10', "")
@property
def scene11(self):
return self._raw_data.get('scene11', "")
@property
def scene12(self):
return self._raw_data.get('scene12', "")
@property
def scene13(self):
return self._raw_data.get('scene13', "")
@property
def scene14(self):
return self._raw_data.get('scene14', "")
@property
def scene15(self):
return self._raw_data.get('scene15', "")
class generic_actor(Parentname, BaseNPC):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def model(self):
return self._raw_data.get('model', None)
@property
def hull_name(self):
return self._raw_data.get('hull_name', "Human")
class prop_car_glass(prop_dynamic):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class prop_car_alarm(prop_physics, EnableDisable):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class func_ladder(Base):
pass
class trigger_auto_crouch(Trigger):
pass
class trigger_active_weapon_detect(Trigger):
@property
def weaponclassname(self):
return self._raw_data.get('weaponclassname', "weapon_dieselcan")
class player_weaponstrip(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class NavBlocker(Base):
@property
def teamToBlock(self):
return self._raw_data.get('teamtoblock', "-1")
@property
def affectsFlow(self):
return self._raw_data.get('affectsflow', "0")
class func_nav_blocker(NavBlocker, Targetname):
pass
class func_nav_avoidance_obstacle(EnableDisable, Targetname):
pass
class NavAttributeRegion(Base):
@property
def precise(self):
return self._raw_data.get('precise', "0")
@property
def crouch(self):
return self._raw_data.get('crouch', "0")
@property
def stairs(self):
return self._raw_data.get('stairs', "0")
@property
def remove_attributes(self):
return parse_source_value(self._raw_data.get('remove_attributes', 0))
@property
def tank_only(self):
return self._raw_data.get('tank_only', "0")
@property
def mob_only(self):
return self._raw_data.get('mob_only', "0")
class func_nav_attribute_region(NavAttributeRegion, Targetname):
pass
class point_nav_attribute_region(NavAttributeRegion, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def mins(self):
return parse_float_vector(self._raw_data.get('mins', "-4 -128 -80"))
@property
def maxs(self):
return parse_float_vector(self._raw_data.get('maxs', "4 128 80"))
class func_elevator(RenderFields, Parentname, Origin, Targetname):
@property
def top(self):
return self._raw_data.get('top', None)
@property
def bottom(self):
return self._raw_data.get('bottom', None)
@property
def speed(self):
return parse_source_value(self._raw_data.get('speed', 100))
@property
def acceleration(self):
return parse_source_value(self._raw_data.get('acceleration', 100))
@property
def blockdamage(self):
return parse_source_value(self._raw_data.get('blockdamage', 0))
@property
def startsound(self):
return self._raw_data.get('startsound', None)
@property
def stopsound(self):
return self._raw_data.get('stopsound', None)
@property
def disablesound(self):
return self._raw_data.get('disablesound', None)
class info_elevator_floor(Parentname, Angles, Targetname):
icon_sprite = "editor/info_target.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class logic_director_query(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def minAngerRange(self):
return parse_source_value(self._raw_data.get('minangerrange', 1))
@property
def maxAngerRange(self):
return parse_source_value(self._raw_data.get('maxangerrange', 10))
@property
def noise(self):
return self._raw_data.get('noise', "0")
class info_director(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class info_game_event_proxy(Targetname):
icon_sprite = "editor/info_game_event_proxy.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def event_name(self):
return self._raw_data.get('event_name', None)
@property
def range(self):
return parse_source_value(self._raw_data.get('range', 50))
class game_scavenge_progress_display(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def Max(self):
return parse_source_value(self._raw_data.get('max', 0))
class fog_volume(EnableDisable, Targetname):
@property
def FogName(self):
return self._raw_data.get('fogname', None)
@property
def PostProcessName(self):
return self._raw_data.get('postprocessname', None)
@property
def ColorCorrectionName(self):
return self._raw_data.get('colorcorrectionname', None)
class filter_activator_team(BaseFilter):
icon_sprite = "editor/filter_team.vmt"
@property
def filterteam(self):
return self._raw_data.get('filterteam', "2")
class filter_activator_infected_class(BaseFilter):
icon_sprite = "editor/filter_team.vmt"
@property
def filterinfectedclass(self):
return self._raw_data.get('filterinfectedclass', "2")
class filter_melee_damage(BaseFilter):
@property
def damagetype(self):
return self._raw_data.get('damagetype', "64")
class filter_health(BaseFilter):
@property
def adrenalinepresence(self):
return self._raw_data.get('adrenalinepresence', "1")
@property
def healthmin(self):
return parse_source_value(self._raw_data.get('healthmin', 0))
@property
def healthmax(self):
return parse_source_value(self._raw_data.get('healthmax', 100))
class prop_minigun(prop_dynamic_base, EnableDisable):
viewport_model = "models/w_models/weapons/w_minigun.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def MaxYaw(self):
return parse_source_value(self._raw_data.get('maxyaw', 90))
@property
def MaxPitch(self):
return parse_source_value(self._raw_data.get('maxpitch', 60))
@property
def MinPitch(self):
return parse_source_value(self._raw_data.get('minpitch', -30))
class prop_mounted_machine_gun(prop_dynamic_base, EnableDisable):
viewport_model = "models/w_models/weapons/50cal.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def MaxYaw(self):
return parse_source_value(self._raw_data.get('maxyaw', 90))
@property
def MaxPitch(self):
return parse_source_value(self._raw_data.get('maxpitch', 60))
@property
def MinPitch(self):
return parse_source_value(self._raw_data.get('minpitch', -30))
class prop_health_cabinet(prop_dynamic_base, EnableDisable):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def HealthCount(self):
return parse_source_value(self._raw_data.get('healthcount', 1))
class info_survivor_position(Parentname, Angles, Targetname):
model_ = "models/survivors/survivor_coach.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def Order(self):
return parse_source_value(self._raw_data.get('order', 1))
@property
def SurvivorName(self):
return self._raw_data.get('survivorname', "")
@property
def SurvivorIntroSequence(self):
return self._raw_data.get('survivorintrosequence', "")
@property
def GameMode(self):
return self._raw_data.get('gamemode', "")
@property
def SurvivorConcept(self):
return self._raw_data.get('survivorconcept', "")
@property
def HideWeapons(self):
return self._raw_data.get('hideweapons', "0")
class info_survivor_rescue(PlayerClass, Angles, Targetname):
model_ = "models/survivors/survivor_coach.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def rescueEyePos(self):
return self._raw_data.get('rescueeyepos', None)
@property
def model(self):
return self._raw_data.get('model', "models/editor/playerstart.mdl")
class trigger_finale(Angles, EnableDisable, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def model(self):
return self._raw_data.get('model', "models/props_misc/german_radio.mdl")
@property
def disableshadows(self):
return self._raw_data.get('disableshadows', "0")
@property
def FirstUseDelay(self):
return parse_source_value(self._raw_data.get('firstusedelay', 0))
@property
def UseDelay(self):
return parse_source_value(self._raw_data.get('usedelay', 0))
@property
def type(self):
return self._raw_data.get('type', "0")
@property
def ScriptFile(self):
return self._raw_data.get('scriptfile', None)
@property
def VersusTravelCompletion(self):
return parse_source_value(self._raw_data.get('versustravelcompletion', 0.2))
@property
def IsSacrificeFinale(self):
return self._raw_data.get('issacrificefinale', "0")
class trigger_standoff(Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def model(self):
return self._raw_data.get('model', "models/props_misc/german_radio.mdl")
@property
def disableshadows(self):
return self._raw_data.get('disableshadows', "0")
@property
def UseDuration(self):
return parse_source_value(self._raw_data.get('useduration', 0))
@property
def UseDelay(self):
return parse_source_value(self._raw_data.get('usedelay', 0))
class info_changelevel(Base):
@property
def targetname(self):
return self._raw_data.get('targetname', None)
@property
def map(self):
return self._raw_data.get('map', None)
@property
def landmark(self):
return self._raw_data.get('landmark', None)
class prop_door_rotating_checkpoint(prop_door_rotating):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class info_zombie_spawn(Parentname, Angles, Targetname):
model_ = "models/infected/common_male01.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def population(self):
return self._raw_data.get('population', "default")
@property
def offer_tank(self):
return self._raw_data.get('offer_tank', "0")
class info_zombie_border(Parentname, Angles, EnableDisable, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class info_remarkable(Targetname, Origin):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def contextsubject(self):
return self._raw_data.get('contextsubject', "")
class Weapon(Angles, Targetname):
pass
class WeaponSpawnSingle(Parentname, Studiomodel, Angles, Global, Targetname):
@property
def weaponskin(self):
return parse_source_value(self._raw_data.get('weaponskin', -1))
@property
def glowrange(self):
return parse_source_value(self._raw_data.get('glowrange', 0))
@property
def solid(self):
return self._raw_data.get('solid', "6")
class WeaponSpawn(WeaponSpawnSingle):
@property
def count(self):
return parse_source_value(self._raw_data.get('count', 5))
class weapon_item_spawn(Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def weaponskin(self):
return parse_source_value(self._raw_data.get('weaponskin', -1))
@property
def glowrange(self):
return parse_source_value(self._raw_data.get('glowrange', 0))
@property
def item1(self):
return parse_source_value(self._raw_data.get('item1', 1))
@property
def item2(self):
return parse_source_value(self._raw_data.get('item2', 0))
@property
def item3(self):
return parse_source_value(self._raw_data.get('item3', 1))
@property
def item4(self):
return parse_source_value(self._raw_data.get('item4', 1))
@property
def item5(self):
return parse_source_value(self._raw_data.get('item5', 1))
@property
def item6(self):
return parse_source_value(self._raw_data.get('item6', 0))
@property
def item7(self):
return parse_source_value(self._raw_data.get('item7', 0))
@property
def item8(self):
return parse_source_value(self._raw_data.get('item8', 0))
@property
def item11(self):
return parse_source_value(self._raw_data.get('item11', 1))
@property
def item12(self):
return parse_source_value(self._raw_data.get('item12', 0))
@property
def item13(self):
return parse_source_value(self._raw_data.get('item13', 0))
@property
def item16(self):
return parse_source_value(self._raw_data.get('item16', 0))
@property
def item17(self):
return parse_source_value(self._raw_data.get('item17', 0))
@property
def item18(self):
return parse_source_value(self._raw_data.get('item18', 0))
@property
def melee_weapon(self):
return self._raw_data.get('melee_weapon', "")
class upgrade_spawn(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def laser_sight(self):
return parse_source_value(self._raw_data.get('laser_sight', 1))
@property
def upgradepack_incendiary(self):
return parse_source_value(self._raw_data.get('upgradepack_incendiary', 1))
@property
def upgradepack_explosive(self):
return parse_source_value(self._raw_data.get('upgradepack_explosive', 1))
class upgrade_ammo_explosive(Angles, Targetname):
viewport_model = "models/props/terror/exploding_ammo.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def count(self):
return parse_source_value(self._raw_data.get('count', 4))
class upgrade_ammo_incendiary(Angles, Targetname):
viewport_model = "models/props/terror/incendiary_ammo.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def count(self):
return parse_source_value(self._raw_data.get('count', 4))
class upgrade_laser_sight(Angles, Targetname):
viewport_model = "models/w_models/Weapons/w_laser_sights.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_pistol_spawn(WeaponSpawn):
viewport_model = "models/w_models/Weapons/w_pistol_a.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_pistol_magnum_spawn(WeaponSpawn):
viewport_model = "models/w_models/Weapons/w_desert_eagle.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_smg_spawn(WeaponSpawn):
viewport_model = "models/w_models/Weapons/w_smg_uzi.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_pumpshotgun_spawn(WeaponSpawn):
viewport_model = "models/w_models/Weapons/w_shotgun.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_autoshotgun_spawn(WeaponSpawn):
viewport_model = "models/w_models/Weapons/w_autoshot_m4super.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_rifle_spawn(WeaponSpawn):
viewport_model = "models/w_models/Weapons/w_rifle_m16a2.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_hunting_rifle_spawn(WeaponSpawn):
viewport_model = "models/w_models/Weapons/w_sniper_mini14.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_smg_silenced_spawn(WeaponSpawn):
viewport_model = "models/w_models/Weapons/w_smg_a.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_shotgun_chrome_spawn(WeaponSpawn):
viewport_model = "models/w_models/Weapons/w_pumpshotgun_A.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_shotgun_spas_spawn(WeaponSpawn):
viewport_model = "models/w_models/Weapons/w_shotgun_spas.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_rifle_desert_spawn(WeaponSpawn):
viewport_model = "models/w_models/Weapons/w_rifle_B.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_rifle_ak47_spawn(WeaponSpawn):
viewport_model = "models/w_models/Weapons/w_rifle_ak47.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_sniper_military_spawn(WeaponSpawn):
viewport_model = "models/w_models/Weapons/w_sniper_military.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_chainsaw_spawn(WeaponSpawn):
viewport_model = "models/weapons/melee/w_chainsaw.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_grenade_launcher_spawn(WeaponSpawn):
viewport_model = "models/w_models/weapons/w_grenade_launcher.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_rifle_m60_spawn(WeaponSpawn):
viewport_model = "models/w_models/Weapons/w_m60.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_smg_mp5_spawn(WeaponSpawn):
viewport_model = "models/w_models/Weapons/w_smg_mp5.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_rifle_sg552_spawn(WeaponSpawn):
viewport_model = "models/w_models/Weapons/w_rifle_sg552.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_sniper_awp_spawn(WeaponSpawn):
viewport_model = "models/w_models/Weapons/w_sniper_awp.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_sniper_scout_spawn(WeaponSpawn):
viewport_model = "models/w_models/Weapons/w_sniper_scout.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_pipe_bomb_spawn(WeaponSpawn):
viewport_model = "models/w_models/weapons/w_eq_pipebomb.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_molotov_spawn(WeaponSpawn):
viewport_model = "models/w_models/weapons/w_eq_molotov.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_vomitjar_spawn(WeaponSpawn):
viewport_model = "models/w_models/weapons/w_eq_bile_flask.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_first_aid_kit_spawn(WeaponSpawnSingle):
viewport_model = "models/w_models/weapons/w_eq_Medkit.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_pain_pills_spawn(WeaponSpawnSingle):
viewport_model = "models/w_models/weapons/w_eq_painpills.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_adrenaline_spawn(WeaponSpawnSingle):
viewport_model = "models/w_models/weapons/w_eq_adrenaline.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_defibrillator_spawn(WeaponSpawnSingle):
viewport_model = "models/w_models/weapons/w_eq_defibrillator.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_gascan_spawn(WeaponSpawnSingle):
viewport_model = "models/props_junk/gascan001a.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_upgradepack_incendiary_spawn(WeaponSpawnSingle):
viewport_model = "models/w_models/weapons/w_eq_incendiary_ammopack.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_upgradepack_explosive_spawn(WeaponSpawnSingle):
viewport_model = "models/w_models/weapons/w_eq_explosive_ammopack.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_first_aid_kit(WeaponSpawnSingle):
viewport_model = "models/w_models/weapons/w_eq_Medkit.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_grenade_launcher(WeaponSpawnSingle):
viewport_model = "models/w_models/weapons/w_grenade_launcher.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class weapon_melee_spawn(WeaponSpawn):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def melee_weapon(self):
return self._raw_data.get('melee_weapon', "any")
class weapon_scavenge_item_spawn(WeaponSpawnSingle):
viewport_model = "models/props_junk/gascan001a.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def glowstate(self):
return self._raw_data.get('glowstate', "3")
class point_prop_use_target(Targetname, Origin):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def nozzle(self):
return self._raw_data.get('nozzle', None)
class weapon_spawn(WeaponSpawn):
viewport_model = "models/w_models/Weapons/w_rifle_m16a2.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def weapon_selection(self):
return self._raw_data.get('weapon_selection', "any_primary")
@property
def spawn_without_director(self):
return self._raw_data.get('spawn_without_director', "0")
@property
def no_cs_weapons(self):
return self._raw_data.get('no_cs_weapons', "0")
class weapon_ammo_spawn(WeaponSpawn):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class info_map_parameters(Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def AmmoDensity(self):
return parse_source_value(self._raw_data.get('ammodensity', 6.48))
@property
def PainPillDensity(self):
return parse_source_value(self._raw_data.get('painpilldensity', 6.48))
@property
def MolotovDensity(self):
return parse_source_value(self._raw_data.get('molotovdensity', 6.48))
@property
def PipeBombDensity(self):
return parse_source_value(self._raw_data.get('pipebombdensity', 6.48))
@property
def PistolDensity(self):
return parse_source_value(self._raw_data.get('pistoldensity', 6.48))
@property
def GasCanDensity(self):
return parse_source_value(self._raw_data.get('gascandensity', 6.48))
@property
def OxygenTankDensity(self):
return parse_source_value(self._raw_data.get('oxygentankdensity', 6.48))
@property
def PropaneTankDensity(self):
return parse_source_value(self._raw_data.get('propanetankdensity', 6.48))
@property
def MeleeWeaponDensity(self):
return parse_source_value(self._raw_data.get('meleeweapondensity', 6.48))
@property
def AdrenalineDensity(self):
return parse_source_value(self._raw_data.get('adrenalinedensity', 6.48))
@property
def DefibrillatorDensity(self):
return parse_source_value(self._raw_data.get('defibrillatordensity', 3.0))
@property
def VomitJarDensity(self):
return parse_source_value(self._raw_data.get('vomitjardensity', 6.48))
@property
def UpgradepackDensity(self):
return parse_source_value(self._raw_data.get('upgradepackdensity', 1.0))
@property
def ChainsawDensity(self):
return parse_source_value(self._raw_data.get('chainsawdensity', 1.0))
@property
def ConfigurableWeaponDensity(self):
return parse_source_value(self._raw_data.get('configurableweapondensity', -1.0))
@property
def ConfigurableWeaponClusterRange(self):
return parse_source_value(self._raw_data.get('configurableweaponclusterrange', 100))
@property
def MagnumDensity(self):
return parse_source_value(self._raw_data.get('magnumdensity', -1.0))
@property
def ItemClusterRange(self):
return parse_source_value(self._raw_data.get('itemclusterrange', 50))
@property
def FinaleItemClusterCount(self):
return parse_source_value(self._raw_data.get('finaleitemclustercount', 3))
class info_map_parameters_versus(Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def AmmoDensity(self):
return parse_source_value(self._raw_data.get('ammodensity', 6.48))
@property
def PainPillDensity(self):
return parse_source_value(self._raw_data.get('painpilldensity', 6.48))
@property
def MolotovDensity(self):
return parse_source_value(self._raw_data.get('molotovdensity', 6.48))
@property
def PipeBombDensity(self):
return parse_source_value(self._raw_data.get('pipebombdensity', 6.48))
@property
def PistolDensity(self):
return parse_source_value(self._raw_data.get('pistoldensity', 6.48))
@property
def GasCanDensity(self):
return parse_source_value(self._raw_data.get('gascandensity', 6.48))
@property
def OxygenTankDensity(self):
return parse_source_value(self._raw_data.get('oxygentankdensity', 6.48))
@property
def PropaneTankDensity(self):
return parse_source_value(self._raw_data.get('propanetankdensity', 6.48))
@property
def MeleeWeaponDensity(self):
return parse_source_value(self._raw_data.get('meleeweapondensity', 6.48))
@property
def AdrenalineDensity(self):
return parse_source_value(self._raw_data.get('adrenalinedensity', 6.48))
@property
def DefibrillatorDensity(self):
return parse_source_value(self._raw_data.get('defibrillatordensity', 2.50))
@property
def VomitJarDensity(self):
return parse_source_value(self._raw_data.get('vomitjardensity', 6.48))
@property
def UpgradepackDensity(self):
return parse_source_value(self._raw_data.get('upgradepackdensity', 1.0))
@property
def ChainsawDensity(self):
return parse_source_value(self._raw_data.get('chainsawdensity', 1.0))
@property
def ConfigurableWeaponDensity(self):
return parse_source_value(self._raw_data.get('configurableweapondensity', -1.0))
@property
def ConfigurableWeaponClusterRange(self):
return parse_source_value(self._raw_data.get('configurableweaponclusterrange', 100))
@property
def MagnumDensity(self):
return parse_source_value(self._raw_data.get('magnumdensity', -1.0))
@property
def ItemClusterRange(self):
return parse_source_value(self._raw_data.get('itemclusterrange', 50))
@property
def FinaleItemClusterCount(self):
return parse_source_value(self._raw_data.get('finaleitemclustercount', 3))
class info_gamemode(Angles, Targetname):
icon_sprite = "editor/info_gamemode.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class beam_spotlight(RenderFields, Parentname, Angles, Targetname):
model_ = "models/editor/cone_helper.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def maxspeed(self):
return parse_source_value(self._raw_data.get('maxspeed', 100))
@property
def spotlightlength(self):
return parse_source_value(self._raw_data.get('spotlightlength', 500))
@property
def spotlightwidth(self):
return parse_source_value(self._raw_data.get('spotlightwidth', 50))
@property
def HDRColorScale(self):
return parse_source_value(self._raw_data.get('hdrcolorscale', 0.7))
class env_detail_controller(Angles):
icon_sprite = "editor/env_particles.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def fademindist(self):
return parse_source_value(self._raw_data.get('fademindist', 512))
@property
def fademaxdist(self):
return parse_source_value(self._raw_data.get('fademaxdist', 1024))
class info_goal_infected_chase(Parentname, Targetname):
icon_sprite = "editor/info_target.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class func_playerinfected_clip(Parentname, Inputfilter, Shadow, Global, EnableDisable, RenderFields, Targetname):
@property
def Solidity(self):
return self._raw_data.get('solidity', "2")
@property
def vrad_brush_cast_shadows(self):
return self._raw_data.get('vrad_brush_cast_shadows', "0")
class func_playerghostinfected_clip(Parentname, Inputfilter, Shadow, Global, EnableDisable, RenderFields, Targetname):
@property
def Solidity(self):
return self._raw_data.get('solidity', "2")
@property
def vrad_brush_cast_shadows(self):
return self._raw_data.get('vrad_brush_cast_shadows', "0")
class commentary_dummy(Angles, Targetname):
@property
def model(self):
return self._raw_data.get('model', "models/survivors/survivor_coach.mdl")
@property
def EyeHeight(self):
return parse_source_value(self._raw_data.get('eyeheight', 64))
@property
def StartingAnim(self):
return self._raw_data.get('startinganim', "Idle_Calm_Pistol")
@property
def StartingWeapons(self):
return self._raw_data.get('startingweapons', "weapon_pistol")
@property
def LookAtPlayers(self):
return self._raw_data.get('lookatplayers', "0")
@property
def HeadYawPoseParam(self):
return self._raw_data.get('headyawposeparam', "Head_Yaw")
@property
def HeadPitchPoseParam(self):
return self._raw_data.get('headpitchposeparam', "Head_Pitch")
class commentary_zombie_spawner(Parentname, Angles, Targetname):
model_ = "models/infected/smoker.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class env_outtro_stats(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class trigger_hurt_ghost(Trigger):
@property
def master(self):
return self._raw_data.get('master', None)
@property
def damage(self):
return parse_source_value(self._raw_data.get('damage', 10))
@property
def damagecap(self):
return parse_source_value(self._raw_data.get('damagecap', 20))
@property
def damagetype(self):
return self._raw_data.get('damagetype', "0")
@property
def damagemodel(self):
return self._raw_data.get('damagemodel', "0")
@property
def nodmgforce(self):
return self._raw_data.get('nodmgforce', "0")
class func_nav_connection_blocker(Parentname, Targetname):
pass
class env_player_blocker(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def mins(self):
return parse_float_vector(self._raw_data.get('mins', "-4 -128 -80"))
@property
def maxs(self):
return parse_float_vector(self._raw_data.get('maxs', "4 128 80"))
@property
def initialstate(self):
return self._raw_data.get('initialstate', "1")
@property
def BlockType(self):
return self._raw_data.get('blocktype', "0")
class env_physics_blocker(Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def boxmins(self):
return parse_float_vector(self._raw_data.get('boxmins', "-8 -8 -8"))
@property
def boxmaxs(self):
return parse_float_vector(self._raw_data.get('boxmaxs', "8 8 8"))
@property
def initialstate(self):
return self._raw_data.get('initialstate', "1")
@property
def BlockType(self):
return self._raw_data.get('blocktype', "0")
class trigger_upgrade_laser_sight(Trigger):
pass
class logic_game_event(Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def eventName(self):
return self._raw_data.get('eventname', "")
class func_button_timed(Parentname, DamageFilter, Origin, RenderFields, Targetname):
@property
def use_time(self):
return parse_source_value(self._raw_data.get('use_time', 5))
@property
def use_string(self):
return self._raw_data.get('use_string', "Using....")
@property
def use_sub_string(self):
return self._raw_data.get('use_sub_string', "")
@property
def glow(self):
return self._raw_data.get('glow', None)
@property
def auto_disable(self):
return self._raw_data.get('auto_disable', "1")
@property
def locked_sound(self):
return self._raw_data.get('locked_sound', "0")
class prop_fuel_barrel(Studiomodel, Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def fademindist(self):
return parse_source_value(self._raw_data.get('fademindist', -1))
@property
def fademaxdist(self):
return parse_source_value(self._raw_data.get('fademaxdist', 0))
@property
def fadescale(self):
return parse_source_value(self._raw_data.get('fadescale', 1))
@property
def BasePiece(self):
return self._raw_data.get('basepiece', "models/props_industrial/barrel_fuel_partb.mdl")
@property
def FlyingPiece01(self):
return self._raw_data.get('flyingpiece01', "models/props_industrial/barrel_fuel_parta.mdl")
@property
def FlyingPiece02(self):
return self._raw_data.get('flyingpiece02', "")
@property
def FlyingPiece03(self):
return self._raw_data.get('flyingpiece03', "")
@property
def FlyingPiece04(self):
return self._raw_data.get('flyingpiece04', "")
@property
def DetonateParticles(self):
return self._raw_data.get('detonateparticles', "weapon_pipebomb")
@property
def FlyingParticles(self):
return self._raw_data.get('flyingparticles', "barrel_fly")
@property
def DetonateSound(self):
return self._raw_data.get('detonatesound', "BaseGrenade.Explode")
class logic_versus_random(Targetname):
icon_sprite = "editor/logic_auto.vmt"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
class env_weaponfire(Parentname, Angles, EnableDisable, Targetname):
model_ = "models/editor/cone_helper.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def TargetArc(self):
return parse_source_value(self._raw_data.get('targetarc', 40))
@property
def TargetRange(self):
return parse_source_value(self._raw_data.get('targetrange', 3600))
@property
def filtername(self):
return self._raw_data.get('filtername', None)
@property
def DamageMod(self):
return parse_source_value(self._raw_data.get('damagemod', 1.0))
@property
def WeaponType(self):
return self._raw_data.get('weapontype', "1")
@property
def TargetTeam(self):
return self._raw_data.get('targetteam', "3")
@property
def IgnorePlayers(self):
return self._raw_data.get('ignoreplayers', "0")
class env_rock_launcher(Angles, Targetname):
model_ = "models/editor/cone_helper.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def RockTargetName(self):
return self._raw_data.get('rocktargetname', None)
@property
def RockDamageOverride(self):
return parse_source_value(self._raw_data.get('rockdamageoverride', 0))
class func_extinguisher(EnableDisable, Targetname):
pass
class func_ragdoll_fader(EnableDisable, Targetname):
pass
class prop_minigun_l4d1(prop_dynamic_base, EnableDisable):
viewport_model = "models/w_models/weapons/w_minigun.mdl"
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def MaxYaw(self):
return parse_source_value(self._raw_data.get('maxyaw', 90))
@property
def MaxPitch(self):
return parse_source_value(self._raw_data.get('maxpitch', 60))
@property
def MinPitch(self):
return parse_source_value(self._raw_data.get('minpitch', -30))
class trigger_escape(Trigger):
pass
class func_buildable_button(Parentname, Origin, Targetname):
@property
def is_cumulative_use(self):
return self._raw_data.get('is_cumulative_use', "0")
class point_script_use_target(Targetname, Origin):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
@property
def model(self):
return self._raw_data.get('model', None)
class scripted_item_drop(Studiomodel, Parentname, Angles, Targetname):
@property
def origin(self):
return parse_int_vector(self._raw_data.get('origin', "0 0 0"))
pass
entity_class_handle = {
'Angles': Angles,
'Origin': Origin,
'Studiomodel': Studiomodel,
'BasePlat': BasePlat,
'Targetname': Targetname,
'Parentname': Parentname,
'BaseBrush': BaseBrush,
'EnableDisable': EnableDisable,
'RenderFxChoices': RenderFxChoices,
'Shadow': Shadow,
'Glow': Glow,
'SystemLevelChoice': SystemLevelChoice,
'RenderFields': RenderFields,
'Inputfilter': Inputfilter,
'Global': Global,
'EnvGlobal': EnvGlobal,
'DamageFilter': DamageFilter,
'ResponseContext': ResponseContext,
'Breakable': Breakable,
'BreakableBrush': BreakableBrush,
'BreakableProp': BreakableProp,
'BaseNPC': BaseNPC,
'info_npc_spawn_destination': info_npc_spawn_destination,
'BaseNPCMaker': BaseNPCMaker,
'npc_template_maker': npc_template_maker,
'BaseHelicopter': BaseHelicopter,
'PlayerClass': PlayerClass,
'Light': Light,
'Node': Node,
'HintNode': HintNode,
'TriggerOnce': TriggerOnce,
'Trigger': Trigger,
'worldbase': worldbase,
'worldspawn': worldspawn,
'ambient_generic': ambient_generic,
'ambient_music': ambient_music,
'sound_mix_layer': sound_mix_layer,
'func_lod': func_lod,
'env_zoom': env_zoom,
'env_screenoverlay': env_screenoverlay,
'env_screeneffect': env_screeneffect,
'env_texturetoggle': env_texturetoggle,
'env_splash': env_splash,
'env_particlelight': env_particlelight,
'env_sun': env_sun,
'game_ragdoll_manager': game_ragdoll_manager,
'game_gib_manager': game_gib_manager,
'env_dof_controller': env_dof_controller,
'env_lightglow': env_lightglow,
'env_smokestack': env_smokestack,
'env_fade': env_fade,
'env_player_surface_trigger': env_player_surface_trigger,
'trigger_tonemap': trigger_tonemap,
'env_tonemap_controller': env_tonemap_controller,
'env_tonemap_controller_infected': env_tonemap_controller_infected,
'env_tonemap_controller_ghost': env_tonemap_controller_ghost,
'func_useableladder': func_useableladder,
'func_ladderendpoint': func_ladderendpoint,
'info_ladder_dismount': info_ladder_dismount,
'func_areaportalwindow': func_areaportalwindow,
'func_wall': func_wall,
'func_clip_vphysics': func_clip_vphysics,
'func_brush': func_brush,
'vgui_screen_base': vgui_screen_base,
'vgui_screen': vgui_screen,
'vgui_slideshow_display': vgui_slideshow_display,
'cycler': cycler,
'func_orator': func_orator,
'gibshooterbase': gibshooterbase,
'env_beam': env_beam,
'env_beverage': env_beverage,
'env_embers': env_embers,
'env_funnel': env_funnel,
'env_blood': env_blood,
'env_bubbles': env_bubbles,
'env_explosion': env_explosion,
'env_smoketrail': env_smoketrail,
'env_physexplosion': env_physexplosion,
'env_physimpact': env_physimpact,
'env_fire': env_fire,
'env_firesource': env_firesource,
'env_firesensor': env_firesensor,
'env_entity_igniter': env_entity_igniter,
'env_fog_controller': env_fog_controller,
'postprocess_controller': postprocess_controller,
'env_steam': env_steam,
'env_laser': env_laser,
'env_message': env_message,
'env_hudhint': env_hudhint,
'env_shake': env_shake,
'env_viewpunch': env_viewpunch,
'env_rotorwash_emitter': env_rotorwash_emitter,
'gibshooter': gibshooter,
'env_shooter': env_shooter,
'env_rotorshooter': env_rotorshooter,
'env_soundscape_proxy': env_soundscape_proxy,
'env_soundscape': env_soundscape,
'env_soundscape_triggerable': env_soundscape_triggerable,
'env_spark': env_spark,
'env_sprite': env_sprite,
'env_sprite_oriented': env_sprite_oriented,
'env_wind': env_wind,
'sky_camera': sky_camera,
'BaseSpeaker': BaseSpeaker,
'game_weapon_manager': game_weapon_manager,
'game_end': game_end,
'game_player_equip': game_player_equip,
'game_player_team': game_player_team,
'game_score': game_score,
'game_text': game_text,
'point_enable_motion_fixup': point_enable_motion_fixup,
'point_message': point_message,
'point_spotlight': point_spotlight,
'point_tesla': point_tesla,
'point_clientcommand': point_clientcommand,
'point_servercommand': point_servercommand,
'point_broadcastclientcommand': point_broadcastclientcommand,
'point_bonusmaps_accessor': point_bonusmaps_accessor,
'game_ui': game_ui,
'point_entity_finder': point_entity_finder,
'game_zone_player': game_zone_player,
'infodecal': infodecal,
'info_projecteddecal': info_projecteddecal,
'info_no_dynamic_shadow': info_no_dynamic_shadow,
'info_player_start': info_player_start,
'info_overlay': info_overlay,
'info_overlay_transition': info_overlay_transition,
'info_intermission': info_intermission,
'info_landmark': info_landmark,
'info_null': info_null,
'info_target': info_target,
'info_particle_target': info_particle_target,
'info_particle_system': info_particle_system,
'phys_ragdollmagnet': phys_ragdollmagnet,
'info_lighting': info_lighting,
'info_teleport_destination': info_teleport_destination,
'info_node': info_node,
'info_node_hint': info_node_hint,
'info_node_air': info_node_air,
'info_node_air_hint': info_node_air_hint,
'info_hint': info_hint,
'info_node_link': info_node_link,
'info_node_link_controller': info_node_link_controller,
'info_radial_link_controller': info_radial_link_controller,
'info_node_climb': info_node_climb,
'light': light,
'light_environment': light_environment,
'light_directional': light_directional,
'light_spot': light_spot,
'light_dynamic': light_dynamic,
'shadow_control': shadow_control,
'color_correction': color_correction,
'color_correction_volume': color_correction_volume,
'KeyFrame': KeyFrame,
'Mover': Mover,
'func_movelinear': func_movelinear,
'func_water_analog': func_water_analog,
'func_rotating': func_rotating,
'func_platrot': func_platrot,
'keyframe_track': keyframe_track,
'move_keyframed': move_keyframed,
'move_track': move_track,
'RopeKeyFrame': RopeKeyFrame,
'keyframe_rope': keyframe_rope,
'move_rope': move_rope,
'Button': Button,
'func_button': func_button,
'func_rot_button': func_rot_button,
'momentary_rot_button': momentary_rot_button,
'Door': Door,
'func_door': func_door,
'func_door_rotating': func_door_rotating,
'BaseFadeProp': BaseFadeProp,
'prop_door_rotating': prop_door_rotating,
'prop_wall_breakable': prop_wall_breakable,
'env_cubemap': env_cubemap,
'BModelParticleSpawner': BModelParticleSpawner,
'func_dustmotes': func_dustmotes,
'func_smokevolume': func_smokevolume,
'func_dustcloud': func_dustcloud,
'env_dustpuff': env_dustpuff,
'env_particlescript': env_particlescript,
'env_effectscript': env_effectscript,
'logic_auto': logic_auto,
'point_viewcontrol': point_viewcontrol,
'point_posecontroller': point_posecontroller,
'logic_compare': logic_compare,
'logic_branch': logic_branch,
'logic_branch_listener': logic_branch_listener,
'logic_case': logic_case,
'logic_multicompare': logic_multicompare,
'logic_relay': logic_relay,
'logic_timer': logic_timer,
'hammer_updateignorelist': hammer_updateignorelist,
'logic_collision_pair': logic_collision_pair,
'env_microphone': env_microphone,
'math_remap': math_remap,
'math_colorblend': math_colorblend,
'math_counter': math_counter,
'logic_lineto': logic_lineto,
'logic_navigation': logic_navigation,
'logic_autosave': logic_autosave,
'logic_active_autosave': logic_active_autosave,
'point_template': point_template,
'env_entity_maker': env_entity_maker,
'BaseFilter': BaseFilter,
'filter_multi': filter_multi,
'filter_activator_name': filter_activator_name,
'filter_activator_model': filter_activator_model,
'filter_activator_context': filter_activator_context,
'filter_activator_class': filter_activator_class,
'filter_activator_mass_greater': filter_activator_mass_greater,
'filter_damage_type': filter_damage_type,
'filter_enemy': filter_enemy,
'point_anglesensor': point_anglesensor,
'point_angularvelocitysensor': point_angularvelocitysensor,
'point_velocitysensor': point_velocitysensor,
'point_proximity_sensor': point_proximity_sensor,
'point_teleport': point_teleport,
'point_hurt': point_hurt,
'point_playermoveconstraint': point_playermoveconstraint,
'point_push': point_push,
'func_physbox': func_physbox,
'TwoObjectPhysics': TwoObjectPhysics,
'phys_constraintsystem': phys_constraintsystem,
'phys_keepupright': phys_keepupright,
'physics_cannister': physics_cannister,
'info_constraint_anchor': info_constraint_anchor,
'info_mass_center': info_mass_center,
'phys_spring': phys_spring,
'phys_hinge': phys_hinge,
'phys_ballsocket': phys_ballsocket,
'phys_constraint': phys_constraint,
'phys_pulleyconstraint': phys_pulleyconstraint,
'phys_slideconstraint': phys_slideconstraint,
'phys_lengthconstraint': phys_lengthconstraint,
'phys_ragdollconstraint': phys_ragdollconstraint,
'phys_convert': phys_convert,
'ForceController': ForceController,
'phys_thruster': phys_thruster,
'phys_torque': phys_torque,
'phys_motor': phys_motor,
'phys_magnet': phys_magnet,
'prop_detail_base': prop_detail_base,
'prop_static_base': prop_static_base,
'prop_dynamic_base': prop_dynamic_base,
'prop_detail': prop_detail,
'prop_static': prop_static,
'prop_dynamic': prop_dynamic,
'prop_dynamic_override': prop_dynamic_override,
'BasePropPhysics': BasePropPhysics,
'prop_physics_override': prop_physics_override,
'prop_physics': prop_physics,
'prop_physics_multiplayer': prop_physics_multiplayer,
'prop_ragdoll': prop_ragdoll,
'prop_dynamic_ornament': prop_dynamic_ornament,
'func_areaportal': func_areaportal,
'func_occluder': func_occluder,
'func_breakable': func_breakable,
'func_breakable_surf': func_breakable_surf,
'func_conveyor': func_conveyor,
'func_detail': func_detail,
'func_viscluster': func_viscluster,
'func_illusionary': func_illusionary,
'func_precipitation': func_precipitation,
'func_precipitation_blocker': func_precipitation_blocker,
'func_detail_blocker': func_detail_blocker,
'func_wall_toggle': func_wall_toggle,
'func_guntarget': func_guntarget,
'func_fish_pool': func_fish_pool,
'PlatSounds': PlatSounds,
'Trackchange': Trackchange,
'BaseTrain': BaseTrain,
'func_trackautochange': func_trackautochange,
'func_trackchange': func_trackchange,
'func_tracktrain': func_tracktrain,
'func_tanktrain': func_tanktrain,
'func_traincontrols': func_traincontrols,
'tanktrain_aitarget': tanktrain_aitarget,
'tanktrain_ai': tanktrain_ai,
'path_track': path_track,
'test_traceline': test_traceline,
'trigger_autosave': trigger_autosave,
'trigger_changelevel': trigger_changelevel,
'trigger_gravity': trigger_gravity,
'trigger_playermovement': trigger_playermovement,
'trigger_soundscape': trigger_soundscape,
'trigger_hurt': trigger_hurt,
'trigger_remove': trigger_remove,
'trigger_multiple': trigger_multiple,
'trigger_once': trigger_once,
'trigger_look': trigger_look,
'trigger_push': trigger_push,
'trigger_wind': trigger_wind,
'trigger_impact': trigger_impact,
'trigger_proximity': trigger_proximity,
'trigger_teleport': trigger_teleport,
'trigger_transition': trigger_transition,
'trigger_serverragdoll': trigger_serverragdoll,
'ai_speechfilter': ai_speechfilter,
'water_lod_control': water_lod_control,
'info_camera_link': info_camera_link,
'logic_measure_movement': logic_measure_movement,
'npc_furniture': npc_furniture,
'env_credits': env_credits,
'material_modify_control': material_modify_control,
'point_devshot_camera': point_devshot_camera,
'logic_playerproxy': logic_playerproxy,
'env_spritetrail': env_spritetrail,
'env_projectedtexture': env_projectedtexture,
'func_reflective_glass': func_reflective_glass,
'env_particle_performance_monitor': env_particle_performance_monitor,
'npc_puppet': npc_puppet,
'point_gamestats_counter': point_gamestats_counter,
'func_instance': func_instance,
'func_instance_parms': func_instance_parms,
'env_instructor_hint': env_instructor_hint,
'info_target_instructor_hint': info_target_instructor_hint,
'logic_script': logic_script,
'func_timescale': func_timescale,
'func_block_charge': func_block_charge,
'info_ambient_mob_start': info_ambient_mob_start,
'info_ambient_mob_end': info_ambient_mob_end,
'info_ambient_mob': info_ambient_mob,
'info_item_position': info_item_position,
'info_l4d1_survivor_spawn': info_l4d1_survivor_spawn,
'env_airstrike_indoors': env_airstrike_indoors,
'env_airstrike_outdoors': env_airstrike_outdoors,
'point_viewcontrol_multiplayer': point_viewcontrol_multiplayer,
'point_viewcontrol_survivor': point_viewcontrol_survivor,
'point_deathfall_camera': point_deathfall_camera,
'logic_choreographed_scene': logic_choreographed_scene,
'logic_scene_list_manager': logic_scene_list_manager,
'generic_actor': generic_actor,
'prop_car_glass': prop_car_glass,
'prop_car_alarm': prop_car_alarm,
'func_ladder': func_ladder,
'trigger_auto_crouch': trigger_auto_crouch,
'trigger_active_weapon_detect': trigger_active_weapon_detect,
'player_weaponstrip': player_weaponstrip,
'NavBlocker': NavBlocker,
'func_nav_blocker': func_nav_blocker,
'func_nav_avoidance_obstacle': func_nav_avoidance_obstacle,
'NavAttributeRegion': NavAttributeRegion,
'func_nav_attribute_region': func_nav_attribute_region,
'point_nav_attribute_region': point_nav_attribute_region,
'func_elevator': func_elevator,
'info_elevator_floor': info_elevator_floor,
'logic_director_query': logic_director_query,
'info_director': info_director,
'info_game_event_proxy': info_game_event_proxy,
'game_scavenge_progress_display': game_scavenge_progress_display,
'fog_volume': fog_volume,
'filter_activator_team': filter_activator_team,
'filter_activator_infected_class': filter_activator_infected_class,
'filter_melee_damage': filter_melee_damage,
'filter_health': filter_health,
'prop_minigun': prop_minigun,
'prop_mounted_machine_gun': prop_mounted_machine_gun,
'prop_health_cabinet': prop_health_cabinet,
'info_survivor_position': info_survivor_position,
'info_survivor_rescue': info_survivor_rescue,
'trigger_finale': trigger_finale,
'trigger_standoff': trigger_standoff,
'info_changelevel': info_changelevel,
'prop_door_rotating_checkpoint': prop_door_rotating_checkpoint,
'info_zombie_spawn': info_zombie_spawn,
'info_zombie_border': info_zombie_border,
'info_remarkable': info_remarkable,
'Weapon': Weapon,
'WeaponSpawnSingle': WeaponSpawnSingle,
'WeaponSpawn': WeaponSpawn,
'weapon_item_spawn': weapon_item_spawn,
'upgrade_spawn': upgrade_spawn,
'upgrade_ammo_explosive': upgrade_ammo_explosive,
'upgrade_ammo_incendiary': upgrade_ammo_incendiary,
'upgrade_laser_sight': upgrade_laser_sight,
'weapon_pistol_spawn': weapon_pistol_spawn,
'weapon_pistol_magnum_spawn': weapon_pistol_magnum_spawn,
'weapon_smg_spawn': weapon_smg_spawn,
'weapon_pumpshotgun_spawn': weapon_pumpshotgun_spawn,
'weapon_autoshotgun_spawn': weapon_autoshotgun_spawn,
'weapon_rifle_spawn': weapon_rifle_spawn,
'weapon_hunting_rifle_spawn': weapon_hunting_rifle_spawn,
'weapon_smg_silenced_spawn': weapon_smg_silenced_spawn,
'weapon_shotgun_chrome_spawn': weapon_shotgun_chrome_spawn,
'weapon_shotgun_spas_spawn': weapon_shotgun_spas_spawn,
'weapon_rifle_desert_spawn': weapon_rifle_desert_spawn,
'weapon_rifle_ak47_spawn': weapon_rifle_ak47_spawn,
'weapon_sniper_military_spawn': weapon_sniper_military_spawn,
'weapon_chainsaw_spawn': weapon_chainsaw_spawn,
'weapon_grenade_launcher_spawn': weapon_grenade_launcher_spawn,
'weapon_rifle_m60_spawn': weapon_rifle_m60_spawn,
'weapon_smg_mp5_spawn': weapon_smg_mp5_spawn,
'weapon_rifle_sg552_spawn': weapon_rifle_sg552_spawn,
'weapon_sniper_awp_spawn': weapon_sniper_awp_spawn,
'weapon_sniper_scout_spawn': weapon_sniper_scout_spawn,
'weapon_pipe_bomb_spawn': weapon_pipe_bomb_spawn,
'weapon_molotov_spawn': weapon_molotov_spawn,
'weapon_vomitjar_spawn': weapon_vomitjar_spawn,
'weapon_first_aid_kit_spawn': weapon_first_aid_kit_spawn,
'weapon_pain_pills_spawn': weapon_pain_pills_spawn,
'weapon_adrenaline_spawn': weapon_adrenaline_spawn,
'weapon_defibrillator_spawn': weapon_defibrillator_spawn,
'weapon_gascan_spawn': weapon_gascan_spawn,
'weapon_upgradepack_incendiary_spawn': weapon_upgradepack_incendiary_spawn,
'weapon_upgradepack_explosive_spawn': weapon_upgradepack_explosive_spawn,
'weapon_first_aid_kit': weapon_first_aid_kit,
'weapon_grenade_launcher': weapon_grenade_launcher,
'weapon_melee_spawn': weapon_melee_spawn,
'weapon_scavenge_item_spawn': weapon_scavenge_item_spawn,
'point_prop_use_target': point_prop_use_target,
'weapon_spawn': weapon_spawn,
'weapon_ammo_spawn': weapon_ammo_spawn,
'info_map_parameters': info_map_parameters,
'info_map_parameters_versus': info_map_parameters_versus,
'info_gamemode': info_gamemode,
'beam_spotlight': beam_spotlight,
'env_detail_controller': env_detail_controller,
'info_goal_infected_chase': info_goal_infected_chase,
'func_playerinfected_clip': func_playerinfected_clip,
'func_playerghostinfected_clip': func_playerghostinfected_clip,
'commentary_dummy': commentary_dummy,
'commentary_zombie_spawner': commentary_zombie_spawner,
'env_outtro_stats': env_outtro_stats,
'trigger_hurt_ghost': trigger_hurt_ghost,
'func_nav_connection_blocker': func_nav_connection_blocker,
'env_player_blocker': env_player_blocker,
'env_physics_blocker': env_physics_blocker,
'trigger_upgrade_laser_sight': trigger_upgrade_laser_sight,
'logic_game_event': logic_game_event,
'func_button_timed': func_button_timed,
'prop_fuel_barrel': prop_fuel_barrel,
'logic_versus_random': logic_versus_random,
'env_weaponfire': env_weaponfire,
'env_rock_launcher': env_rock_launcher,
'func_extinguisher': func_extinguisher,
'func_ragdoll_fader': func_ragdoll_fader,
'prop_minigun_l4d1': prop_minigun_l4d1,
'trigger_escape': trigger_escape,
'func_buildable_button': func_buildable_button,
'point_script_use_target': point_script_use_target,
'scripted_item_drop': scripted_item_drop,
} | 25.502219 | 118 | 0.674537 |
7944026a4bb6969e78236a9208d9fb93543bbca3 | 15,408 | py | Python | billforward/apis/emails_api.py | billforward/bf-python | d2b812329ca3ed1fd94364d7f46f69ad74665596 | [
"Apache-2.0"
] | 2 | 2016-11-23T17:32:37.000Z | 2022-02-24T05:13:20.000Z | billforward/apis/emails_api.py | billforward/bf-python | d2b812329ca3ed1fd94364d7f46f69ad74665596 | [
"Apache-2.0"
] | null | null | null | billforward/apis/emails_api.py | billforward/bf-python | d2b812329ca3ed1fd94364d7f46f69ad74665596 | [
"Apache-2.0"
] | 1 | 2016-12-30T20:02:48.000Z | 2016-12-30T20:02:48.000Z | # coding: utf-8
"""
BillForward REST API
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class EmailsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_email_by_id(self, email_id, **kwargs):
"""
Retrieves a single invoice, specified by the version-ID parameter.
{ \"nickname\" : \"Retrieve by version\",\"response\" : \"getEmailByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_email_by_id(email_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str email_id: (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: EmailPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_email_by_id_with_http_info(email_id, **kwargs)
else:
(data) = self.get_email_by_id_with_http_info(email_id, **kwargs)
return data
def get_email_by_id_with_http_info(self, email_id, **kwargs):
"""
Retrieves a single invoice, specified by the version-ID parameter.
{ \"nickname\" : \"Retrieve by version\",\"response\" : \"getEmailByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_email_by_id_with_http_info(email_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str email_id: (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: EmailPagedMetadata
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['email_id', 'organizations']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_email_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'email_id' is set
if ('email_id' not in params) or (params['email_id'] is None):
raise ValueError("Missing the required parameter `email_id` when calling `get_email_by_id`")
resource_path = '/emails/{email-ID}'.replace('{format}', 'json')
path_params = {}
if 'email_id' in params:
path_params['email-ID'] = params['email_id']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'text/xml'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='EmailPagedMetadata',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_email_html_by_id(self, email_id, **kwargs):
"""
Retrieves a single invoice, specified by the version-ID parameter.
{ \"nickname\" : \"Retrieve by version\",\"response\" : \"getEmailByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_email_html_by_id(email_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str email_id: (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_email_html_by_id_with_http_info(email_id, **kwargs)
else:
(data) = self.get_email_html_by_id_with_http_info(email_id, **kwargs)
return data
def get_email_html_by_id_with_http_info(self, email_id, **kwargs):
"""
Retrieves a single invoice, specified by the version-ID parameter.
{ \"nickname\" : \"Retrieve by version\",\"response\" : \"getEmailByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_email_html_by_id_with_http_info(email_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str email_id: (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['email_id', 'organizations']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_email_html_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'email_id' is set
if ('email_id' not in params) or (params['email_id'] is None):
raise ValueError("Missing the required parameter `email_id` when calling `get_email_html_by_id`")
resource_path = '/emails/{email-ID}.html'.replace('{format}', 'json')
path_params = {}
if 'email_id' in params:
path_params['email-ID'] = params['email_id']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/html'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
def get_email_text_by_id(self, email_id, **kwargs):
"""
Retrieves a single invoice, specified by the version-ID parameter.
{ \"nickname\" : \"Retrieve by version\",\"response\" : \"getEmailByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_email_text_by_id(email_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str email_id: (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_email_text_by_id_with_http_info(email_id, **kwargs)
else:
(data) = self.get_email_text_by_id_with_http_info(email_id, **kwargs)
return data
def get_email_text_by_id_with_http_info(self, email_id, **kwargs):
"""
Retrieves a single invoice, specified by the version-ID parameter.
{ \"nickname\" : \"Retrieve by version\",\"response\" : \"getEmailByID.html\"}
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_email_text_by_id_with_http_info(email_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str email_id: (required)
:param list[str] organizations: A list of organization-IDs used to restrict the scope of API calls.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['email_id', 'organizations']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_email_text_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'email_id' is set
if ('email_id' not in params) or (params['email_id'] is None):
raise ValueError("Missing the required parameter `email_id` when calling `get_email_text_by_id`")
resource_path = '/emails/{email-ID}.txt'.replace('{format}', 'json')
path_params = {}
if 'email_id' in params:
path_params['email-ID'] = params['email_id']
query_params = {}
if 'organizations' in params:
query_params['organizations'] = params['organizations']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['text/plain'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['text/plain'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'))
| 40.978723 | 109 | 0.580802 |
7944029fe67360695f7e7e7d732ef9a7d83601a6 | 4,525 | py | Python | bin/txt2params.py | haampie/rocprofiler | 9b7edb39207cb040093ffdd7d99c9d6e56807c95 | [
"MIT"
] | 44 | 2018-12-21T14:09:30.000Z | 2022-03-18T15:11:32.000Z | bin/txt2params.py | haampie/rocprofiler | 9b7edb39207cb040093ffdd7d99c9d6e56807c95 | [
"MIT"
] | 61 | 2019-01-22T00:35:06.000Z | 2022-03-31T13:33:28.000Z | bin/txt2params.py | haampie/rocprofiler | 9b7edb39207cb040093ffdd7d99c9d6e56807c95 | [
"MIT"
] | 26 | 2019-06-04T16:38:39.000Z | 2022-01-21T21:29:55.000Z | ################################################################################
# Copyright (c) 2018 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
################################################################################
import os, sys, re
# gen_params() takes a text file like the output of rocminfo cmd and parses it into a map {key,value}
# where key is the param and value is the value of this param
# for example: Threadmodel : "posix"
# it also processes encompasing sections to generate a full param name such as (section names separated by '_'):
# "Agent2_PoolInfo_ISAInfo_ISA1_WorkgroupMaxSizeperDimension_x": "1024(0x400)",
def gen_params(txtfile):
fields = {}
counter = 0
parent_field = ''
nbr_indent = 0
nbr_indent_prev = 0
check_for_dims = False
with open(txtfile) as fp:
for line in fp:
me = re.match(r'\*\*\* Done \*\*\*',line) #Marks the end of cmd
if me:
parent_field = ''
nbr_indent = 0
nbr_indent_prev = 0
check_for_dims = False
continue
mv = re.match(r'HCC clang version\s+(.*)',line) # outlier: only line with a version number and no ':', special case
if mv:
key = 'HCCclangversion'
val = mv.group(1)
counter = counter + 1
fields[(counter,key)] = val
continue
# Variable 'check_for_dims' is True for text like this:
# Workgroup Max Size per Dimension:
# x 1024(0x400)
# y 1024(0x400)
# z 1024(0x400)
if check_for_dims == True:
mc = re.match(r'\s*([x|y|z])\s+(.*)',line)
if mc:
key_sav = mc.group(1)
if parent_field != '':
key = parent_field + '.' + mc.group(1)
else:
key = mc.group(1)
val = re.sub(r"\s+", "", mc.group(2))
counter = counter + 1
fields[(counter,key)] = val
if key_sav == 'z':
check_for_dims = False
nbr_indent_prev = nbr_indent
mi = re.search(r'^(\s+)\w+.*', line)
md = re.search(':', line)
if mi:
nbr_indent = int(len(mi.group(1)) / 2) #indentation cnt
else:
if not md:
tmp = re.sub(r"\s+", "", line)
if tmp.isalnum():
parent_field = tmp
if nbr_indent < nbr_indent_prev:
go_back_parent = (nbr_indent_prev - nbr_indent)
for i in range(go_back_parent): #decrease as many levels up as needed
pos = parent_field.rfind('.')
if pos != -1:
parent_field = parent_field[:pos]
# Process lines such as :
# Segment: GLOBAL; FLAGS: KERNARG, FINE GRAINED
# Size: 131897644(0x7dc992c) KB
for lin in line.split(';'):
lin = re.sub(r"\s+", "", lin)
m = re.match(r'(.*):(.*)', lin)
if m:
key, val = m.group(1), m.group(2)
if parent_field != '':
key = parent_field + '.' + key
if val == '':
mk = re.match(r'.*Dimension',key)
if mk: # expect x,y,z on next 3 lines
check_for_dims = True
parent_field = key
else:
counter = counter + 1
fields[(counter,key)] = val
else:
if nbr_indent != nbr_indent_prev and not check_for_dims :
parent_field = parent_field + '.' + lin.replace(':','')
return fields
| 40.765766 | 121 | 0.572597 |
794402ad6492a51c0113a437713d34d947e05b2c | 246 | py | Python | src/tensor/op/geometric/combine/stack/horizontal.py | jedhsu/tensor | 3b2fe21029fa7c50b034190e77d79d1a94ea5e8f | [
"Apache-2.0"
] | null | null | null | src/tensor/op/geometric/combine/stack/horizontal.py | jedhsu/tensor | 3b2fe21029fa7c50b034190e77d79d1a94ea5e8f | [
"Apache-2.0"
] | null | null | null | src/tensor/op/geometric/combine/stack/horizontal.py | jedhsu/tensor | 3b2fe21029fa7c50b034190e77d79d1a94ea5e8f | [
"Apache-2.0"
] | null | null | null | """
*Horizontal Stack*
"""
from dataclasses import dataclass
import jax.numpy as jnp
from ._operator import StackOperator
__all__ = ["HorizontalStack"]
@dataclass
class HorizontalStack(
StackOperator,
):
operator = jnp.hstack
| 11.714286 | 36 | 0.719512 |
79440360c20c72e28e3d685d89cbd83e3ec28359 | 3,084 | py | Python | PYTHON/11.py | guikingma/project_euler | e4deffc63f39a304e449c12ee1fa1233c6f70e91 | [
"WTFPL"
] | 1 | 2015-11-06T07:04:22.000Z | 2015-11-06T07:04:22.000Z | PYTHON/11.py | guikingma/project_euler | e4deffc63f39a304e449c12ee1fa1233c6f70e91 | [
"WTFPL"
] | null | null | null | PYTHON/11.py | guikingma/project_euler | e4deffc63f39a304e449c12ee1fa1233c6f70e91 | [
"WTFPL"
] | null | null | null | '''
11:
In the 20x20 grid below, four numbers along a diagonal line have been marked in red.
The product of these numbers is 26 * 63 * 78 * 14 = 1788696.
What is the greatest product of four adjacent numbers in the same direction (up, down, left, right, or diagonally) in the 20x20 grid?
'''
a = [""] * 20
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
a[0] = "08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08".split(" ")
a[1] = "49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00".split(" ")
a[2] = "81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65".split(" ")
a[3] = "52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91".split(" ")
a[4] = "22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80".split(" ")
a[5] = "24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50".split(" ")
a[6] = "32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70".split(" ")
a[7] = "67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21".split(" ")
a[8] = "24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72".split(" ")
a[9] = "21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95".split(" ")
a[10] ="78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92".split(" ")
a[11] ="16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57".split(" ")
a[12] ="86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58".split(" ")
a[13] ="19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40".split(" ")
a[14] ="04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66".split(" ")
a[15] ="88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69".split(" ")
a[16] ="04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36".split(" ")
a[17] ="20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16".split(" ")
a[18] ="20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54".split(" ")
a[19] ="01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48".split(" ")
def pri_diagonal():
ret = 0
for i in range(0,17):
for j in range(0,17):
resp = int(a[i][j])*int(a[i+1][j+1])*int(a[i+2][j+2])*int(a[i+3][j+3])
#print "(%s, %s) = %s - mult = %s" %(i, j, a[i][j], resp)
if resp > ret:
ret = resp
return ret
def sec_diagonal():
ret = 0
for i in range(0,17):
for j in range(3,20):
resp = int(a[i][j])*int(a[i+1][j-1])*int(a[i+2][j-2])*int(a[i+3][j-3])
#print "(%s, %s) = %s - mult = %s" %(i, j, a[i][j], resp)
if resp > ret:
ret = resp
return ret
def left_right():
ret = 0
for i in range(0,20):
for j in range(0,17):
resp = int(a[i][j])*int(a[i][j+1])*int(a[i][j+2])*int(a[i][j+3])
#print "(%s, %s) = %s - mult = %s" %(i, j, a[i][j], resp)
if resp > ret:
ret = resp
return ret
def up_down():
ret = 0
for i in range(0,17):
for j in range(0,20):
resp = int(a[i][j])*int(a[i+1][j])*int(a[i+2][j])*int(a[i+3][j])
#print "(%s, %s) = %s - mult = %s" %(i, j, a[i][j], resp)
if resp > ret:
ret = resp
return ret
def execute():
print max(pri_diagonal(), sec_diagonal(), left_right(), up_down())
execute() | 40.578947 | 133 | 0.571336 |
794405b82081e98a9a20255c237f162966b23b6f | 2,830 | py | Python | start.py | Seamooo/nn-chess | 38c3e54ab34af54d650240d35d0372689ff104bf | [
"MIT"
] | null | null | null | start.py | Seamooo/nn-chess | 38c3e54ab34af54d650240d35d0372689ff104bf | [
"MIT"
] | null | null | null | start.py | Seamooo/nn-chess | 38c3e54ab34af54d650240d35d0372689ff104bf | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from flask import Flask, url_for, send_from_directory, request, jsonify
from chess import Board
import random
app = Flask(__name__, static_url_path='', static_folder='public')
@app.route('/', methods=['GET'])
def get_index():
return app.send_static_file('index.html')
@app.route('/img/<path:path>')
def send_img(path):
return send_from_directory('img', path)
def score(state):
return random.random()
# not really mcts at the moment
# closer to randomised minimax
def dfs(state, num_searches, isplayer):
comp = max if isplayer else min
board = Board(state)
if(board.is_checkmate()):
return 0.0 if isplayer else 1.0
if(board.is_stalemate()):
return 0.5
if(board.can_claim_draw()):
return 0.5
rv = 0.0 if isplayer else 1.0
moves = list(board.legal_moves)
# if number of nodes to search exceeds the search limit provided
# score all nodes
if(len(moves) > num_searches):
for move in moves:
board.push(move)
rv = comp(rv,score(board.fen()))
board.pop()
return rv
search_move = [False for _ in range(len(moves))]
search_count = 0
i = 0
while i < len(moves):
inc = min(60, len(moves)-i)
# gives 50% chance for all nodes to be searched
# TODO make an adjustable probability parameter
tp = random.randrange(1<<inc)
while tp > 0:
if(tp&1):
search_move[i] = True
search_count += 1
tp >>= 1
i += inc
score_count = len(moves) - search_count
nodes_per_search = 0
if(search_count != 0):
nodes_per_search = (num_searches - score_count) // search_count
vals = []
for i in range(len(moves)):
move = moves[i]
if search_move[i]:
board.push(move)
rv = comp(rv,dfs(board.fen(), nodes_per_search, not isplayer))
board.pop()
else:
board.push(move)
rv = comp(rv, score(move))
board.pop()
return rv
def get_move(state):
#assume both players play the best moves
#states where you play, take the maximum calculated as rv
#states where opponent plays, take the min calculated as rv
#limit number of searches artificially for now
#choose better numbers later
print('started search over state', state)
search_limit = 1000
board = Board(state)
mx = 0
rv = None
vals = []
for move in board.legal_moves:
board.push(move)
val = dfs(board.fen(), search_limit, False)
if(val > mx):
rv = move
mx = val
board.pop()
vals.append((str(move), val))
print(vals)
print('found move', rv)
return rv
@app.route('/ai', methods=['POST'])
def ai_move():
data = request.json
if 'state' not in data:
return 'bad request', 400
try:
move = get_move(data['state'])
except Exception as e:
print(e)
return 'bad request', 400
rv = str(move)
return jsonify({
'from':rv[:2],
'to':rv[2:4],
'promote':'' if len(rv) < 5 else rv[5]
})
#server running on port 4444
if __name__ == '__main__':
app.run(port=4444)
| 24.396552 | 71 | 0.681272 |
7944075947846574d9101e0589c934cb6d7f7051 | 2,073 | py | Python | fucking-awesome.py | Correia-jpv/fu-awe-readme | 39ec3912859e6840642ab4b627055fa2ab1c5ca3 | [
"MIT"
] | null | null | null | fucking-awesome.py | Correia-jpv/fu-awe-readme | 39ec3912859e6840642ab4b627055fa2ab1c5ca3 | [
"MIT"
] | null | null | null | fucking-awesome.py | Correia-jpv/fu-awe-readme | 39ec3912859e6840642ab4b627055fa2ab1c5ca3 | [
"MIT"
] | null | null | null | import os
import re
import time
import requests
from requests.adapters import HTTPAdapter
from dotenv import load_dotenv
load_dotenv()
PAT = os.getenv("PAT")
ORIGIN_REPO = os.getenv("ORIGIN_REPO")
originRepoURL = f'https://raw.githubusercontent.com/correia-jpv/fucking-{ORIGIN_REPO}/main/readme.md'
# Session
HEADERS = {
"Authorization": 'token ' + PAT
}
session = requests.session()
session.mount("https://", HTTPAdapter())
session.headers.update(HEADERS)
md = session.get(originRepoURL)
if int(md.headers['Content-Length']) <= 35:
originRepoURL = re.sub('readme', 'README', originRepoURL)
md = session.get(originRepoURL)
md = md.content.decode("utf-8")
externalLinks = "(?:[^\!]|^)\[([^\[\]]+)\]\((?!https://github.com/|#)([^()]+)\)"
internalGithubRepos = "(?:[^\!]|^)\[([^\[\]]+)\]\((?=https://github.com/)([^()]+)\)"
def grabStats(repo):
head, sep, tail = repo.group(2).partition('/tree/')
repoUrl = re.sub('https://github.com', 'https://api.github.com/repos', head)
r = session.get(repoUrl)
while (r.status_code == 403):
print('waiting')
for second in range(0, 600):
time.sleep(1)
r = session.get(repoUrl)
data = r.json()
repoStars = str(data['stargazers_count'] if 'stargazers_count' in data else '?')
repoForks = str(data['forks'] if 'forks' in data else '?')
for n in range(6-len(repoStars)):
repoStars = ' ' + repoStars
for n in range(6-len(repoForks)):
repoForks = ' ' + repoForks
repoStars = '<b><code>' + repoStars + '⭐</code></b>'
repoForks = '<b><code>' + repoForks + '🍴</code></b>'
return f' {repoStars} {repoForks} [' + repo.group(1) + '](' + repo.group(2) + ')'
# curl requests with github matches
md = re.sub(externalLinks, r""" 🌎 [\1](\2)""", md)
md = re.sub(internalGithubRepos, grabStats, md)
# Write users to be followed to file
filename = f'./results/readme-{ORIGIN_REPO}.md'
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w+", encoding='utf-8') as f:
f.write(md) | 30.485294 | 101 | 0.623734 |
794409152085b270bec6b796b2e2a58d2e295bef | 3,341 | py | Python | rdr_service/cloud_utils/gcp_cloud_tasks.py | all-of-us/raw-data-repository | d28ad957557587b03ff9c63d55dd55e0508f91d8 | [
"BSD-3-Clause"
] | 39 | 2017-10-13T19:16:27.000Z | 2021-09-24T16:58:21.000Z | rdr_service/cloud_utils/gcp_cloud_tasks.py | all-of-us/raw-data-repository | d28ad957557587b03ff9c63d55dd55e0508f91d8 | [
"BSD-3-Clause"
] | 312 | 2017-09-08T15:42:13.000Z | 2022-03-23T18:21:40.000Z | rdr_service/cloud_utils/gcp_cloud_tasks.py | all-of-us/raw-data-repository | d28ad957557587b03ff9c63d55dd55e0508f91d8 | [
"BSD-3-Clause"
] | 19 | 2017-09-15T13:58:00.000Z | 2022-02-07T18:33:20.000Z | from datetime import datetime, timedelta, date
import json
import logging
from time import sleep
from google.api_core.exceptions import InternalServerError, GoogleAPICallError
from google.cloud import tasks_v2
from google.protobuf import timestamp_pb2
from rdr_service.config import GAE_PROJECT
from rdr_service.services.flask import TASK_PREFIX
class GCPCloudTask(object):
"""
Use the GCP Cloud Tasks API to run a task later.
"""
# Create a client.
_client = None
def execute(self, endpoint: str, payload: (dict, list)=None, in_seconds: int = 0, project_id: str = GAE_PROJECT,
location: str = 'us-central1', queue: str = 'default', quiet=False):
"""
Make GCP Cloud Task API request to run task later.
:param endpoint: Flask API endpoint to call.
:param payload: dict containing data to send to task.
:param in_seconds: delay before starting task in seconds, default to run immediately.
:param project_id: target project id.
:param location: target location.
:param queue: target cloud task queue.
:param quiet: suppress logging.
"""
if not project_id or project_id == 'localhost':
raise ValueError('Invalid GCP project id')
if not self._client:
self._client = tasks_v2.CloudTasksClient()
if not endpoint:
raise ValueError('endpoint value must be provided.')
if payload and not isinstance(payload, dict):
raise TypeError('payload must be a dict object.')
from rdr_service.resource.main import app
if endpoint not in app.url_map._rules_by_endpoint:
raise ValueError('endpoint is not registered in app.')
res = app.url_map._rules_by_endpoint[endpoint][0]
if not res.rule.startswith(TASK_PREFIX):
raise ValueError('endpoint is not configured using the task prefix.')
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
return obj.__repr__()
if payload:
payload = json.dumps(payload, default=json_serial).encode()
# Construct the fully qualified queue name.
parent = self._client.queue_path(project_id, location, queue)
task = {
"app_engine_http_request": {
"http_method": "POST",
"relative_uri": res.rule
}
}
if payload:
task['app_engine_http_request']['body'] = payload
if in_seconds:
run_ts = datetime.utcnow() + timedelta(seconds=in_seconds)
timestamp = timestamp_pb2.Timestamp()
timestamp.FromDatetime(run_ts)
task['schedule_time'] = timestamp
# Use the client to build and send the task.
retry = 5
while retry:
retry -= 1
try:
response = self._client.create_task(parent=parent, task=task)
if not quiet:
logging.info('Created task {0}'.format(response.name))
return
except (InternalServerError, GoogleAPICallError):
sleep(0.25)
logging.error('Create Cloud Task Failed.')
| 36.714286 | 116 | 0.624963 |
7944092618b4f0c08ccd4276d263571e05b58a04 | 1,722 | py | Python | Code Implementations/rail_fence_cipher.py | GalacticGlum/CryptographyResearchPaper | b538ba91fcee47995b2bf102affa9425badafc0c | [
"Unlicense"
] | null | null | null | Code Implementations/rail_fence_cipher.py | GalacticGlum/CryptographyResearchPaper | b538ba91fcee47995b2bf102affa9425badafc0c | [
"Unlicense"
] | null | null | null | Code Implementations/rail_fence_cipher.py | GalacticGlum/CryptographyResearchPaper | b538ba91fcee47995b2bf102affa9425badafc0c | [
"Unlicense"
] | null | null | null | def encrypt(plain_text, rails):
cipher_text = str()
cycle = max((rails - 1) * 2, 1) # 1 is special case for 1 rail
for rail in range(rails):
ptr = rail
character_distance = cycle - 2 * rail
# Both the bottom and top rails have a (same) character distance of the cycle.
if rail == rails - 1:
character_distance = cycle
# While we have *something* to write
while ptr < len(plain_text):
cipher_text += plain_text[ptr]
ptr += character_distance
# If this is not the top or bottom rail, alternate between two distance patterns
# (one for going up a cycle and one for going down a cycle).
if rail != 0 and rail != rails - 1:
character_distance = cycle - character_distance
return cipher_text
def decrypt(cipher_text, rails):
plain_text = [''] * len(cipher_text)
cipher_index = 0
cycle = max((rails - 1) * 2, 1) # 1 is special case for 1 rail
for rail in range(rails):
ptr = rail
character_distance = cycle - 2 * rail
if rail == rails - 1:
character_distance = cycle
while ptr < len(plain_text):
plain_text[ptr] = cipher_text[cipher_index]
cipher_index += 1
ptr += character_distance
if rail != 0 and rail != rails - 1:
character_distance = cycle - character_distance
return ''.join(plain_text)
rails = int(input('How many rails should I encrypt with?\n'))
cipher_text = encrypt(input('What should I encrypt?\n'), rails)
print('\nEncrypted: ' + cipher_text)
print('Decrypted: ' + decrypt(cipher_text, rails)) | 33.764706 | 93 | 0.592334 |
7944099f339e6bd586e07429710afcfec84afdb9 | 368 | py | Python | test/dummy.py | uliana291/the-zoo | a15a4162c39553abe91224f4feff5d3b66f9413e | [
"MIT"
] | null | null | null | test/dummy.py | uliana291/the-zoo | a15a4162c39553abe91224f4feff5d3b66f9413e | [
"MIT"
] | null | null | null | test/dummy.py | uliana291/the-zoo | a15a4162c39553abe91224f4feff5d3b66f9413e | [
"MIT"
] | null | null | null | repo_files = {
"readme.md": "Hello world!",
"requirements.txt": "django==2.3.4",
"package.json": '{"dependencies": {"webpack": "~0.0.0-rc14"}}',
}
def check_found(context):
yield context.Result("check:found", True, {"answer": 42})
def check_passing(context):
yield context.Result("check:passing", False)
CHECKS = [check_found, check_passing]
| 21.647059 | 67 | 0.646739 |
794409bd586aaf4690914dd1a7f0b1eef4a35ead | 3,202 | py | Python | lta/log_format.py | jnbellinger/lta | b7cb3c65e0f167e56abb67f8283083aafd700e42 | [
"MIT"
] | 1 | 2019-07-30T16:03:26.000Z | 2019-07-30T16:03:26.000Z | lta/log_format.py | jnbellinger/lta | b7cb3c65e0f167e56abb67f8283083aafd700e42 | [
"MIT"
] | 80 | 2019-01-10T21:46:43.000Z | 2022-03-24T22:40:54.000Z | lta/log_format.py | jnbellinger/lta | b7cb3c65e0f167e56abb67f8283083aafd700e42 | [
"MIT"
] | 1 | 2018-12-10T21:13:11.000Z | 2018-12-10T21:13:11.000Z | # log_format.py
"""
Module to provide support for structured logging.
Example code to enable structured logging might look as follows:
structured_formatter = StructuredFormatter(
component_type='Transmogrifier',
component_name='transmog-node-1',
ndjson=True)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(structured_formatter)
root_logger = logging.getLogger(None)
root_logger.setLevel(logging.NOTSET)
root_logger.addHandler(stream_handler)
"""
from datetime import datetime
import json
from logging import Formatter, LogRecord
import traceback
from typing import Dict, List, Optional, Union
class StructuredFormatter(Formatter):
"""
StructuredFormatter is a Formatter for structured logging.
LogRecord objects are formatted as JSON. Under the default configuration,
a StructuredFormatter will render these as NDJSON (http://ndjson.org/)
"""
def __init__(self,
component_name: Optional[str] = None,
component_type: Optional[str] = None,
ndjson: bool = True) -> None:
"""
Create a StructuredFormatter object.
component_name: Optional[str] - The name of the software component
component_type: Optional[str] - The type of the software component
ndjson: bool - Output as NDJSON; defaults to True.
"""
self.component_name = component_name
self.component_type = component_type
self.indent = None if ndjson else 4
self.separators = (',', ':') if ndjson else (', ', ': ')
super(StructuredFormatter, self).__init__()
def format(self, record: LogRecord) -> str:
"""
Format a LogRecord object as a log message.
record - LogRecord object to be formatted
Returns a log message as a str. In the default configuration, this
is JSON on a single line (NDJSON). If the StructuredFormatter is
created with ndjson=False, then each log message will become a
pretty-printed block of JSON.
"""
# ensure our log message has an ISO 8601 timestamp
data: Dict[str, Union[str, List[str]]] = {
'timestamp': datetime.utcnow().isoformat(),
'message': record.getMessage()
}
# copy everything provided to us in the LogRecord object
record_dict = vars(record)
for key in record_dict:
data[key] = record_dict[key]
# if the LogRecord contained an Exception Tuple, format it
if "exc_info" in record_dict:
if record_dict["exc_info"]:
exc_type, exc_value, exc_tb = record_dict["exc_info"]
data["exc_info"] = traceback.format_exception(exc_type, exc_value, exc_tb)
# add the component type if it was configured
if self.component_type:
data['component_type'] = self.component_type
# add the component name if it was configured
if self.component_name:
data['component_name'] = self.component_name
# format the data dictionary as JSON
return json.dumps(data, indent=self.indent, separators=self.separators)
| 39.04878 | 90 | 0.661462 |
794409cfaa3d05a1f2885a2e3a6b8370e6a0a916 | 3,992 | py | Python | task_set/train_inner_test.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 23,901 | 2018-10-04T19:48:53.000Z | 2022-03-31T21:27:42.000Z | task_set/train_inner_test.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 891 | 2018-11-10T06:16:13.000Z | 2022-03-31T10:42:34.000Z | task_set/train_inner_test.py | deepneuralmachine/google-research | d2ce2cf0f5c004f8d78bfeddf6e88e88f4840231 | [
"Apache-2.0"
] | 6,047 | 2018-10-12T06:31:02.000Z | 2022-03-31T13:59:28.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Tests for task_set.train_inner."""
import json
import os
import tempfile
import numpy as np
from task_set import datasets
from task_set import train_inner
from task_set.tasks import base
import tensorflow.compat.v1 as tf
class DummyTask(base.BaseTask):
"""Dummy task used for tests."""
def call_split(self, params, split, with_metrics=False):
r = tf.random_normal(shape=[], dtype=tf.float32)
offset = {
datasets.Split.TRAIN: 1.0,
datasets.Split.VALID_INNER: 2.0,
datasets.Split.VALID_OUTER: 3.0,
datasets.Split.TEST: 4.0,
}
loss = offset[split] + r
if with_metrics:
return loss, {"metric": -1 * loss}
else:
return loss
def get_batch(self, split):
return None
def current_params(self):
return {}
def gradients(self, loss):
return {}
def initial_params(self):
return {}
def get_variables(self):
return []
class TrainInnerTest(tf.test.TestCase):
def test_compute_averaged_loss(self):
task = DummyTask()
params = task.initial_params()
losses, _ = train_inner.compute_averaged_loss(
task, params, num_batches=100, with_metrics=False)
with self.test_session() as sess:
all_np_losses = []
for _ in range(10):
all_np_losses.append(sess.run(losses))
tr, vai, vao, te = zip(*all_np_losses)
# We are averaging over 100 with 10 replications evaluatons.
# This means the std. error of the mean should be 1/sqrt(1000) or 0.03.
# We use a threshold of 0.15, corresponding to a 5-sigma test.
self.assertNear(np.mean(tr), 1.0, 0.15)
self.assertNear(np.mean(vai), 2.0, 0.15)
self.assertNear(np.mean(vao), 3.0, 0.15)
self.assertNear(np.mean(te), 4.0, 0.15)
# ensure that each sample is also different.
self.assertLess(1e-5, np.var(tr), 0.5)
self.assertLess(1e-5, np.var(vai), 0.5)
self.assertLess(1e-5, np.var(vao), 0.5)
self.assertLess(1e-5, np.var(te), 0.5)
losses, metrics = train_inner.compute_averaged_loss(
task, params, num_batches=100, with_metrics=True)
tr_metrics, vai_metrics, vao_metrics, te_metrics = metrics
with self.test_session() as sess:
# this std. error is 1/sqrt(100), or 0.1. 5 std out is 0.5
self.assertNear(sess.run(tr_metrics["metric"]), -1.0, 0.5)
self.assertNear(sess.run(vai_metrics["metric"]), -2.0, 0.5)
self.assertNear(sess.run(vao_metrics["metric"]), -3.0, 0.5)
self.assertNear(sess.run(te_metrics["metric"]), -4.0, 0.5)
def test_train(self):
tmp_dir = tempfile.mkdtemp()
# TODO(lmetz) when toy tasks are done, switch this away from an mlp.
train_inner.train(
tmp_dir,
task_name="mlp_family_seed12",
optimizer_name="adam8p_wide_grid_seed21",
training_steps=10,
eval_every_n=5)
with tf.gfile.Open(os.path.join(tmp_dir, "result")) as f:
result_data = json.loads(f.read())
self.assertEqual(len(result_data), 3)
# 4 losses logged out per timestep
self.assertEqual(len(result_data["5"]), 4)
with tf.gfile.Open(os.path.join(tmp_dir, "time_per_step")) as f:
time_per_step_data = json.loads(f.read())
self.assertIn("mean_last_half", time_per_step_data)
self.assertIn("mean_time", time_per_step_data)
self.assertIn("median_time", time_per_step_data)
if __name__ == "__main__":
tf.test.main()
| 30.945736 | 75 | 0.682365 |
794409ea24b35d8312a8db6931f0c60cd31f57c6 | 10,865 | py | Python | extras/client/alpaca_discovery.py | jamessynge/TinyAlpacaServer | 12cffca15542be418cd42322e2150ba432eeb945 | [
"MIT"
] | 1 | 2021-10-07T04:35:05.000Z | 2021-10-07T04:35:05.000Z | extras/client/alpaca_discovery.py | jamessynge/TinyAlpacaServer | 12cffca15542be418cd42322e2150ba432eeb945 | [
"MIT"
] | null | null | null | extras/client/alpaca_discovery.py | jamessynge/TinyAlpacaServer | 12cffca15542be418cd42322e2150ba432eeb945 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Provides the ability to find IPv4 ASCOM Alpaca servers on the local networks.
Uses the netifaces library to find the broadcast IPs that can be used for
sending the UDP discovery message.
Some functions accept **kwargs (i.e. unspecified keyword arguments) so that they
can be passed arguments that originated from argparse, which may well include
arguments not of interested to the methods here.
Note that I've chosen to omit support for IPv6 because I don't need it for
testing Tiny Alpaca Server.
TODO(jamessynge): Figure out if I can NOT use netifaces to get the network
interface information.
"""
import argparse
import dataclasses
import json
import queue
import socket
import sys
import threading
import time
from typing import Callable, Dict, Generator, List, Optional
import install_advice
try:
import netifaces # pylint: disable=g-import-not-at-top
except ImportError:
install_advice.install_advice('netifaces')
# build_cleaner doesn't find imports that aren't at the top level, so we repeat
# the import here.
import netifaces # pylint: disable=g-import-not-at-top,g-bad-import-order
# Daniel VanNoord selected UDP port 32227 for the Alpaca Discovery protocol, but
# that port is not officially assigned to the protocol, so it may change some
# day. An Alpaca Server can confirm that the packet is intended for it by
# looking for the string 'alpacadiscovery1' as the entire body of the UDP packet
# it receives at that port, and an Alpaca Discovery client can confirm that a
# response is from an Alpaca Server by checking that the response body can be
# parsed as JSON and has a property 'alpacaport' whose value is an integer that
# can be a TCP port number (e.g. 1 to 65535).
ALPACA_DISCOVERY_PORT = 32227
DISCOVERY_REQUEST_BODY = 'alpacadiscovery1'
ALPACA_SERVER_PORT_PROPERTY = 'alpacaport'
DEFAULT_DISCOVERY_SECS = 2.0
@dataclasses.dataclass
class DiscoverySource:
"""Addresses from and to which to send ASCOM Alpaca discovery packets."""
interface_name: str
src_address: str
dst_address: str
dst_is_broadcast: bool
def get_name(self) -> str:
return f'{self.dst_address} via {self.interface_name}'
def create_bound_udp_socket(self) -> socket.socket:
"""Create UDP port for sending to dst_address."""
# --------------------------------
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if self.dst_is_broadcast:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
try:
sock.bind((self.src_address, 0)) # listen to any on a temporary port
except:
print(f'failure to bind {self}', file=sys.stderr, flush=True)
sock.close()
raise
# sock.setblocking(0)
return sock
def send_discovery_packet(self, sock: socket.socket, verbose=False):
"""Writes an Alpaca Discovery UDP Packet to sock."""
if verbose:
action = 'Broadcasting' if self.dst_is_broadcast else 'Sending'
# Appending a \n explicitly because multiple threads will output strings,
# and I've found that the default end value is output as a separate
# operation that can come after the "Collected..." string from another
# thread.
print(
f'{action} from {self.src_address} to {self.dst_address}\n',
flush=True,
end='')
sock.sendto(
DISCOVERY_REQUEST_BODY.encode(encoding='ascii'),
(self.dst_address, ALPACA_DISCOVERY_PORT))
@dataclasses.dataclass
class DiscoveryResponse:
"""Represents an Alpaca Discovery Protocol Response from an Alpaca Server."""
source: DiscoverySource
data_bytes: bytes
recvfrom_addr: str
recvfrom_port: int # The discovery port.
def get_alpaca_server_addr(self) -> str:
return f'{self.recvfrom_addr}:{self.get_port()}'
def get_port(self) -> int:
data_str = str(self.data_bytes, 'ascii')
jsondata = json.loads(data_str)
return int(jsondata[ALPACA_SERVER_PORT_PROPERTY])
def generate_addresses(address_family) -> Generator[Dict[str, str], None, None]:
"""docstring."""
# netifaces.interfaces returns a list of interface names.
for name in netifaces.interfaces():
# netifaces.ifaddresses(interface_name) returns a dictionary mapping an
# address family (e.g. netifaces.AF_INET for IPv4) to a list of address
# groups (dictionaries) provided by that interface. Note that a single
# interface may have multiple addresses, even for a single address family.
for addr_family, addr_groups in netifaces.ifaddresses(name).items():
if address_family == addr_family:
for address_group in addr_groups:
if 'addr' not in address_group:
# Note that I'm assuming
continue
result = dict(interface_name=name)
result.update(address_group)
yield result
def generate_discovery_sources() -> Generator[DiscoverySource, None, None]:
"""docstring."""
for address_group in generate_addresses(netifaces.AF_INET):
if 'broadcast' in address_group:
yield DiscoverySource(
interface_name=address_group['interface_name'],
src_address=address_group['addr'],
dst_address=address_group['broadcast'],
dst_is_broadcast=True)
elif 'peer' in address_group:
yield DiscoverySource(
interface_name=address_group['interface_name'],
src_address=address_group['addr'],
dst_address=address_group['peer'],
dst_is_broadcast=False)
def receiver(sock: socket.socket, max_discovery_secs: float,
response_queue: queue.Queue) -> None:
sock.settimeout(max_discovery_secs)
while True:
try:
data_bytes, addr = sock.recvfrom(1024)
except socket.timeout:
return
# For AF_INET sockets, addr is a pair, (host, port).
response_queue.put((data_bytes, addr[0], addr[1]))
class Discoverer(object):
"""Performs Alpaca Discovery for a single DiscoverySource."""
def __init__(self, source: DiscoverySource):
self.source = source
def perform_discovery(self,
response_queue: queue.Queue,
max_discovery_secs: float = DEFAULT_DISCOVERY_SECS,
verbose=False) -> threading.Thread:
"""Returns a thread which writes DiscoveryResponses to response_queue."""
def worker():
for r in self.generate_responses(
max_discovery_secs=max_discovery_secs, verbose=verbose):
response_queue.put(r)
t = threading.Thread(target=worker, name=self.source.get_name())
t.start()
return t
def generate_responses(
self,
max_discovery_secs: float = DEFAULT_DISCOVERY_SECS,
verbose=False) -> Generator[DiscoveryResponse, None, None]:
"""Yields DiscoveryResponses after sending from the source address."""
sock = self.source.create_bound_udp_socket()
q = queue.Queue(maxsize=1000)
t = threading.Thread(target=receiver, args=(sock, max_discovery_secs, q))
t.start()
iota = max(0.001, min(0.05, max_discovery_secs / 100.0))
time.sleep(iota)
self.source.send_discovery_packet(sock, verbose=verbose)
count = 0
while t.is_alive():
try:
data_bytes, addr, port = q.get(block=True, timeout=iota)
except queue.Empty:
continue
yield DiscoveryResponse(
source=self.source,
data_bytes=data_bytes,
recvfrom_addr=addr,
recvfrom_port=port)
count += 1
t.join()
while not q.empty():
data_bytes, addr, port = q.get(block=False)
yield DiscoveryResponse(
source=self.source,
data_bytes=data_bytes,
recvfrom_addr=addr,
recvfrom_port=port)
if verbose:
# Appending a \n explicitly because multiple threads will output strings,
# and I've found that the default end value is output as a separate
# operation that can come after the "Collected..." string from another
# thread.
print(
f'Collected {count} responses for source {self.source.get_name()}\n',
flush=True,
end='')
def perform_discovery(discovery_response_handler: Callable[[DiscoveryResponse],
None],
sources: Optional[List[DiscoverySource]] = None,
max_discovery_secs: float = DEFAULT_DISCOVERY_SECS,
verbose=False,
**kwargs) -> None:
"""Sends a discovery packet from all sources, passes results to handler."""
del kwargs # Unused.
if sources is None:
if verbose:
print('Finding network interfaces to use for discovery.')
sources = list(generate_discovery_sources())
discoverers = [Discoverer(source) for source in sources]
q = queue.Queue(maxsize=1000)
threads = []
for d in discoverers:
threads.append(
d.perform_discovery(
response_queue=q,
max_discovery_secs=max_discovery_secs,
verbose=verbose))
start_secs = time.time()
while threads:
if not threads[0].is_alive():
t = threads.pop(0)
if verbose:
print('Thread %r is done' % t.name, flush=True)
t.join()
while not q.empty():
dr = q.get(block=False)
discovery_response_handler(dr)
time.sleep(0.01)
end_secs = time.time()
if verbose:
elapsed_secs = end_secs - start_secs
print(f'perform_discovery: elapsed_secs={elapsed_secs}')
if elapsed_secs < max_discovery_secs:
print(
f'perform_discovery: ended {max_discovery_secs - elapsed_secs}s early'
)
def find_first_server(**kwargs) -> Optional[DiscoveryResponse]:
"""Return the first server to respond, else None."""
result = None
def discovery_response_handler(dr: DiscoveryResponse) -> None:
nonlocal result
if result is not None:
return
result = dr
perform_discovery(discovery_response_handler, **kwargs)
return result
def make_discovery_parser() -> argparse.ArgumentParser:
"""Returns a parser for discovery operations."""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
'--max_discovery_secs',
metavar='SECONDS',
type=float,
default=DEFAULT_DISCOVERY_SECS,
help='Time to wait (seconds) for Alpaca Discovery responses.')
parser.add_argument(
'--verbose',
'-v',
action='store_true',
help='Print more messages about what the program is doing.')
return parser
def main():
parser = argparse.ArgumentParser(
description='Find Alpaca servers.', parents=[make_discovery_parser()])
cli_args = parser.parse_args()
cli_kwargs = vars(cli_args)
def discovery_response_handler(dr: DiscoveryResponse) -> None:
print('Found a server at', dr.get_alpaca_server_addr())
perform_discovery(discovery_response_handler, **cli_kwargs)
if __name__ == '__main__':
main()
| 34.823718 | 80 | 0.688909 |
79440b0b8dec8b1db765e967a0ce138b1b4939b9 | 11,263 | py | Python | sha256/core/ubitarray_32.py | greysonDEV/SHA-256 | 512de78a7261728316f79de5be2ca8b6eddebbb6 | [
"MIT"
] | null | null | null | sha256/core/ubitarray_32.py | greysonDEV/SHA-256 | 512de78a7261728316f79de5be2ca8b6eddebbb6 | [
"MIT"
] | null | null | null | sha256/core/ubitarray_32.py | greysonDEV/SHA-256 | 512de78a7261728316f79de5be2ca8b6eddebbb6 | [
"MIT"
] | null | null | null | # ============================================================================ #
# Author: Greyson Murray ([email protected])
#
# Description: This file contains UBitArray32 and other auxiliary methods that
# deal with various bit computations.
#
# LICENSE: MIT
# ============================================================================ #
from __future__ import annotations
from typing import List, Union
from collections.abc import Iterator
from functools import reduce
from sha256.core.bitops import binary, prepad, twos, add
from sha256.const.tables import HEX
class UBitArray32:
"""
(Unsigned 32-Bit Array)
This class is responsible for handling much of the bit computations in the
hash function. Being an unsigned 32-bit array, the maximum integer value
that this array should hold is (2**32)-1, or 4294967295. Negative numbers
will also be converted into their unsigned counterpart.
"""
def __init__(self, bits: List[int]) -> None:
"""
Args:
bits: (List[int]) The list of bits to create a UBitArray32 object
from.
"""
if not bits:
raise ValueError(f"cannot create empty {self.__class__.__name__}")
elif len(bits) > 32:
# only take first 32 bits
bits = bits[-32:]
elif len(bits) < 32:
# pad with zeros to 32 bits
bits = [0]*(32-len(bits)) + bits
self.bits = bits
@classmethod
def fromint(cls, n: int) -> UBitArray32:
"""
Creates a UBitArray32 object from an integer. The integer is converted
into its binary representation and fed back into the
UBitArray32.__init__ method to create the object.
Args:
n: (int) The integer to create a UBitArray32 object from.
Returns:
(bits): (UBitArray32) The resulting UBitArray32 object.
Raises:
(ValueError) The largest number that can be represented in 32-bit is
(2**32)-1, or 4294967295. For this reason a ValueError is raised
if and when a number larger than (2**32)-1 is passed to this
method.
"""
if n > (2**32)-1:
raise ValueError(f"maximum value of (2**32)-1, or 4294967295, exceeded")
elif n < 0:
bits = binary(n*-1)
bits = twos(prepad(bits))
else:
bits = binary(n)
bits = prepad(bits)
return cls(bits)
def toint(self) -> int:
"""
Converts the UBitArray32 object back into its (unsigned) integer
representation.
Returns:
(int) The integer representation of the UBitArray32 object.
"""
# create backwards range for powers
pows = range(len(self)-1,-1,-1)
n = 0
for bit,_pow in zip(self, pows):
n += bit*(2**_pow)
return n
def tohex(self) -> str:
"""
Converts the UBitArray32 object into its hexadecimal representation.
Returns:
(str) The hexadecimal representation of the UBitArray32
object.
"""
chunks = [self[i:i+4] for i in range(0, len(self), 4)]
result = ""
for chunk in chunks:
hex_value = HEX[chunk.toint()]
result += hex_value
return result
def rshift(self, n: int) -> UBitArray32:
"""
Computes a new UBitArray32 object resulting from shifting the bits
'n' positions rightwards.
Args:
n: (int) The amount to shift by.
Returns:
(UBitArray32) The resulting UBitArray32 object.
"""
if n >= len(self):
return self.__class__([0]*len(self))
else:
# chop last n bits, prepend n '0's
result = [0]*n + self.bits[:-n]
return self.__class__(result)
def rotr(self, n: int) -> UBitArray32:
"""
Computes a new UBitArray32 object resulting from rotating the bits
'n' positions rightwards.
Args:
n: (int) The amount to rotate by.
Returns:
(UBitArray32) The resulting UBitArray32 object.
"""
n %= len(self)
# chop last n bits, prepend them
result = self.bits[-n:] + self.bits[:-n]
return self.__class__(result)
def __xor__(self, other: UBitArray32) -> UBitArray32:
"""
Computes the bitwise XOR operation with another instance of
UBitArray32.
Args:
other: (UBitArray32) The other instance to compute XOR with.
Returns:
(UBitArray32) The resulting UBitArray32 object.
"""
result = []
for x,y in zip(self.bits, other.bits):
result.append((x + y) % 2)
return self.__class__(result)
def __add__(self, other: UBitArray32) -> UBitArray32:
"""
Computes the bitwise addition operation with another instance of
UBitArray32.
Args:
other: (UBitArray32) The other instance to add to.
Returns:
(UBitArray32) The resulting UBitArray32 object.
"""
return self.__class__(add(self, other))
def __eq__(self, other: UBitArray32) -> bool:
"""
Computes the bitwise addition operation with another instance of
UBitArray32.
Args:
other: (UBitArray32) The other instance to compare to.
Returns:
(bool) True if both 'self' and 'other' have equal bits; otherwise
False.
"""
return self.bits == other.bits
def __getitem__(self, i) -> Union[int, UBitArray32]:
"""
Gets the bit at the 'i'th index and supports slicing.
Args:
i: (int or slice) The index or slice to retrieve.
Returns:
(int) The bit at the 'i'th postiion (either 0 or 1)
(UBitArray32) The set of bits retrieved from the slicing operation.
"""
if isinstance(i, int):
return self.bits[i]
elif isinstance(i, slice):
if not self.bits[i]:
raise ValueError(f"slice results in empty {self.__class__.__name__}")
return self.__class__(self.bits[i])
def __iter__(self) -> Iterator[int]:
"""
Supports iteration over instances of UBitArray32.
Returns:
(list_iterator): An iterator of the set of bits contained by the
instance of UBitArray32.
"""
return iter(self.bits)
def __len__(self) -> int:
"""
Returns:
(int) The length of bits.
"""
return len(self.bits)
def __str__(self) -> str:
"""
Returns:
(str) A simple string representation, for example:
'00011011110000111000001000110000'
"""
return "".join([str(bit) for bit in self.bits])
def __repr__(self) -> str:
"""
Returns:
(str) A string representation for debugging, for example:
'UBitArray32[0 0 0 1 1 0 1 1 1 1 0 0 0 0 1 1 1 0 0 0 0 0 1 0 0 0 1 1 0 0 0 0]'
"""
cls_name = self.__class__.__name__
bit_repr = " ".join([str(bit) for bit in self.bits])
return f"{cls_name}[{bit_repr}]"
def xor(*bitarrays: UBitArray32) -> UBitArray32:
"""
Computes the bitwise XOR of the input sets of bits.
Args:
*bit_arrays: (UBitArray32) The sets of bits to XOR.
Returns:
(UBitArray32) The result of the XOR operation.
"""
result = reduce(UBitArray32.__xor__, bitarrays)
return result
def ch(a: UBitArray32, b: UBitArray32, c: UBitArray32) -> UBitArray32:
"""
Takes the 'choice' of two sets of bits ('b' and 'c') based off of the bits
in 'a'.
Example:
a: 00010110
b: 11001010
c: 01111000
-> 01101010
Args:
a: (UBitArray32) The model set of bits.
b: (UBitArray32) The bits chosen if the model bit is 1.
c: (UBitArray32) The bits chosen if the model bit is 0.
Returns:
(UBitArray32) The result of the choice operation.
"""
result = []
for model, y, z in zip(a, b, c):
result.append(y if model else z)
return UBitArray32(result)
def maj(a: UBitArray32, b: UBitArray32, c: UBitArray32) -> UBitArray32:
"""
Takes the 'majority' of three sets of bits. The bits in each set are
consecutively iterated over, and the resulting bit is the bit that appears
most across the current bit.
Example:
a: 00010110
b: 11001010
c: 01111000
-> 01011010
Args:
a: (UBitArray32)
b: (UBitArray32)
c: (UBitArray32)
Returns:
(UBitArray32) The result of the majority operation.
"""
result = []
for bits in zip(a,b,c):
result.append(max(bits, key=bits.count))
return UBitArray32(result)
def lsig0(bitarray: UBitArray32) -> UBitArray32:
"""
(lowercase sigma 0)
Computes the XOR of three sets of bits which result from rotating the input
set rightwards by 7, then 18, and then right-shifts by 3.
Args:
bitarray: (UBitArray32) The set of bits to operate on.
Returns:
(UBitArray32) The XOR of the three sets that result from
rotating/shifting the input set..
"""
a = bitarray.rotr(7)
b = bitarray.rotr(18)
c = bitarray.rshift(3)
return xor(a,b,c)
def lsig1(bitarray: UBitArray32) -> UBitArray32:
"""
(lowercase sigma 1)
Computes the XOR of three sets of bits which result from rotating the input
set rightwards by 17, then 19, and then right-shifts by 10.
Args:
bitarray: (UBitArray32) The set of bits to operate on.
Returns:
(UBitArray32) The XOR of the three sets that result from
rotating/shifting the input set..
"""
a = bitarray.rotr(17)
b = bitarray.rotr(19)
c = bitarray.rshift(10)
return xor(a,b,c)
def usig0(bitarray: UBitArray32) -> UBitArray32:
"""
(uppercase sigma 0)
Computes the XOR of three sets of bits which result from rotating the input
set rightwards by 2, then 13, and then 22.
Args:
bitarray: (UBitArray32) The set of bits to operate on.
Returns:
(UBitArray32) The XOR of the three sets that result from
rotating the input set..
"""
a = bitarray.rotr(2)
b = bitarray.rotr(13)
c = bitarray.rotr(22)
return xor(a,b,c)
def usig1(bitarray: UBitArray32) -> UBitArray32:
"""
(uppercase sigma 1)
Computes the XOR of three sets of bits which result from rotating the input
set rightwards by 6, then 11, and then 25.
Args:
bitarray: (UBitArray32) The set of bits to operate on.
Returns:
(UBitArray32) The XOR of the three sets that result from
rotating the input set..
"""
a = bitarray.rotr(6)
b = bitarray.rotr(11)
c = bitarray.rotr(25)
return xor(a,b,c)
| 26.880668 | 94 | 0.568765 |
79440b906143141713b2cffaf7f77a336cff33cc | 18,791 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/community/network/plugins/modules/voss_config.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/community/network/plugins/modules/voss_config.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/community/network/plugins/modules/voss_config.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Copyright: (c) 2018, Extreme Networks Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: voss_config
author: "Lindsay Hill (@LindsayHill)"
short_description: Manage Extreme VOSS configuration sections
description:
- Extreme VOSS configurations use a simple flat text file syntax.
This module provides an implementation for working with EXOS
configuration lines in a deterministic way.
notes:
- Tested against VOSS 7.0.0
- Abbreviated commands are NOT idempotent, see
L(Network FAQ,../network/user_guide/faq.html#why-do-the-config-modules-always-return-changed-true-with-abbreviated-commands).
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
aliases: ['commands']
parents:
description:
- The parent line that uniquely identifies the section the commands
should be checked against. If this argument is omitted, the commands
are checked against the set of top level or global commands. Note
that VOSS configurations only support one level of nested commands.
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is mutually
exclusive with I(lines), I(parents).
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
choices: ['line', 'strict', 'exact', 'none']
default: line
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
default: line
choices: ['line', 'block']
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. If the C(backup_options) value is not given,
the backup file is written to the C(backup) folder in the playbook
root directory or role root directory, if playbook is part of an
ansible role. If the directory does not exist, it is created.
type: bool
default: 'no'
running_config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(running_config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
aliases: ['config']
defaults:
description:
- This argument specifies whether or not to collect all defaults
when getting the remote device running config. When enabled,
the module will get the current config by issuing the command
C(show running-config verbose).
type: bool
default: 'no'
save_when:
description:
- When changes are made to the device running-configuration, the
changes are not copied to non-volatile storage by default. Using
this argument will change that behavior. If the argument is set to
I(always), then the running-config will always be saved and the
I(modified) flag will always be set to True. If the argument is set
to I(modified), then the running-config will only be saved if it
has changed since the last save to startup-config. If the argument
is set to I(never), the running-config will never be saved.
If the argument is set to I(changed), then the running-config
will only be saved if the task has made a change.
default: never
choices: ['always', 'never', 'modified', 'changed']
diff_against:
description:
- When using the C(ansible-playbook --diff) command line argument
the module can generate diffs against different sources.
- When this option is configure as I(startup), the module will return
the diff of the running-config against the startup-config.
- When this option is configured as I(intended), the module will
return the diff of the running-config against the configuration
provided in the C(intended_config) argument.
- When this option is configured as I(running), the module will
return the before and after diff of the running-config with respect
to any changes made to the device configuration.
choices: ['running', 'startup', 'intended']
diff_ignore_lines:
description:
- Use this argument to specify one or more lines that should be
ignored during the diff. This is used for lines in the configuration
that are automatically updated by the system. This argument takes
a list of regular expressions or exact line matches.
intended_config:
description:
- The C(intended_config) provides the master configuration that
the node should conform to and is used to check the final
running-config against. This argument will not modify any settings
on the remote device and is strictly used to check the compliance
of the current device's configuration against. When specifying this
argument, the task should also modify the C(diff_against) value and
set it to I(intended).
backup_options:
description:
- This is a dict object containing configurable options related to backup file path.
The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set
to I(no) this option will be silently ignored.
suboptions:
filename:
description:
- The filename to be used to store the backup configuration. If the filename
is not given it will be generated based on the hostname, current time and date
in format defined by <hostname>_config.<current-date>@<current-time>
dir_path:
description:
- This option provides the path ending with directory name in which the backup
configuration file will be stored. If the directory does not exist it will be first
created and the filename is either the value of C(filename) or default filename
as described in C(filename) options description. If the path value is not given
in that case a I(backup) directory will be created in the current working directory
and backup configuration will be copied in C(filename) within I(backup) directory.
type: path
type: dict
'''
EXAMPLES = """
- name: Configure system name
community.network.voss_config:
lines: prompt "{{ inventory_hostname }}"
- name: Configure interface settings
community.network.voss_config:
lines:
- name "ServerA"
backup: yes
parents: interface GigabitEthernet 1/1
- name: Check the running-config against master config
community.network.voss_config:
diff_against: intended
intended_config: "{{ lookup('file', 'master.cfg') }}"
- name: Check the startup-config against the running-config
community.network.voss_config:
diff_against: startup
diff_ignore_lines:
- qos queue-profile .*
- name: Save running to startup when modified
community.network.voss_config:
save_when: modified
- name: Configurable backup path
community.network.voss_config:
backup: yes
backup_options:
filename: backup.cfg
dir_path: /home/user
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['prompt "VSP200"']
commands:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['interface GigabitEthernet 1/1', 'name "ServerA"', 'exit']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: str
sample: /playbooks/ansible/backup/vsp200_config.2018-08-21@15:00:21
"""
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import ConnectionError
from ansible_collections.community.network.plugins.module_utils.network.voss.voss import run_commands, get_config
from ansible_collections.community.network.plugins.module_utils.network.voss.voss import get_defaults_flag, get_connection
from ansible_collections.community.network.plugins.module_utils.network.voss.voss import get_sublevel_config, VossNetworkConfig
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import dumps
def get_candidate_config(module):
candidate = VossNetworkConfig(indent=0)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
commands = module.params['lines'][0]
if (isinstance(commands, dict)) and (isinstance(commands['command'], list)):
candidate.add(commands['command'], parents=parents)
elif (isinstance(commands, dict)) and (isinstance(commands['command'], str)):
candidate.add([commands['command']], parents=parents)
else:
candidate.add(module.params['lines'], parents=parents)
return candidate
def get_running_config(module, current_config=None, flags=None):
running = module.params['running_config']
if not running:
if not module.params['defaults'] and current_config:
running = current_config
else:
running = get_config(module, flags=flags)
return running
def save_config(module, result):
result['changed'] = True
if not module.check_mode:
run_commands(module, 'save config\r')
else:
module.warn('Skipping command `save config` '
'due to check_mode. Configuration not copied to '
'non-volatile storage')
def main():
""" main entry point for module execution
"""
backup_spec = dict(
filename=dict(),
dir_path=dict(type='path')
)
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block']),
running_config=dict(aliases=['config']),
intended_config=dict(),
defaults=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
backup_options=dict(type='dict', options=backup_spec),
save_when=dict(choices=['always', 'never', 'modified', 'changed'], default='never'),
diff_against=dict(choices=['startup', 'intended', 'running']),
diff_ignore_lines=dict(type='list'),
)
mutually_exclusive = [('lines', 'src'),
('parents', 'src')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines']),
('diff_against', 'intended', ['intended_config'])]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
result = {'changed': False}
parents = module.params['parents'] or list()
match = module.params['match']
replace = module.params['replace']
warnings = list()
result['warnings'] = warnings
diff_ignore_lines = module.params['diff_ignore_lines']
config = None
contents = None
flags = get_defaults_flag(module) if module.params['defaults'] else []
connection = get_connection(module)
if module.params['backup'] or (module._diff and module.params['diff_against'] == 'running'):
contents = get_config(module, flags=flags)
config = VossNetworkConfig(indent=0, contents=contents)
if module.params['backup']:
result['__backup__'] = contents
if any((module.params['lines'], module.params['src'])):
candidate = get_candidate_config(module)
if match != 'none':
config = get_running_config(module)
config = VossNetworkConfig(contents=config, indent=0)
if parents:
config = get_sublevel_config(config, module)
configobjs = candidate.difference(config, match=match, replace=replace)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands')
commands = commands.split('\n')
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['commands'] = commands
result['updates'] = commands
# send the configuration commands to the device and merge
# them with the current running config
if not module.check_mode:
if commands:
try:
connection.edit_config(candidate=commands)
except ConnectionError as exc:
module.fail_json(msg=to_text(commands, errors='surrogate_then_replace'))
result['changed'] = True
running_config = module.params['running_config']
startup = None
if module.params['save_when'] == 'always':
save_config(module, result)
elif module.params['save_when'] == 'modified':
match = module.params['match']
replace = module.params['replace']
try:
# Note we need to re-retrieve running config, not use cached version
running = connection.get_config(source='running')
startup = connection.get_config(source='startup')
response = connection.get_diff(candidate=startup, running=running, diff_match=match,
diff_ignore_lines=diff_ignore_lines, path=None,
diff_replace=replace)
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
config_diff = response['config_diff']
if config_diff:
save_config(module, result)
elif module.params['save_when'] == 'changed' and result['changed']:
save_config(module, result)
if module._diff:
if not running_config:
try:
# Note we need to re-retrieve running config, not use cached version
contents = connection.get_config(source='running')
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
else:
contents = running_config
# recreate the object in order to process diff_ignore_lines
running_config = VossNetworkConfig(indent=0, contents=contents,
ignore_lines=diff_ignore_lines)
if module.params['diff_against'] == 'running':
if module.check_mode:
module.warn("unable to perform diff against running-config due to check mode")
contents = None
else:
contents = config.config_text
elif module.params['diff_against'] == 'startup':
if not startup:
try:
contents = connection.get_config(source='startup')
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
else:
contents = startup
elif module.params['diff_against'] == 'intended':
contents = module.params['intended_config']
if contents is not None:
base_config = VossNetworkConfig(indent=0, contents=contents,
ignore_lines=diff_ignore_lines)
if running_config.sha1 != base_config.sha1:
if module.params['diff_against'] == 'intended':
before = running_config
after = base_config
elif module.params['diff_against'] in ('startup', 'running'):
before = base_config
after = running_config
result.update({
'changed': True,
'diff': {'before': str(before), 'after': str(after)}
})
module.exit_json(**result)
if __name__ == '__main__':
main()
| 41.573009 | 129 | 0.65707 |
79440c1684e8cd9185325689d9bdc263f9f75510 | 5,304 | py | Python | jtop/core/tegrastats.py | alx/jetson_stats | a55d5d67bf268bb47fe75a9a0b15598e99bfe9ea | [
"MIT"
] | null | null | null | jtop/core/tegrastats.py | alx/jetson_stats | a55d5d67bf268bb47fe75a9a0b15598e99bfe9ea | [
"MIT"
] | null | null | null | jtop/core/tegrastats.py | alx/jetson_stats | a55d5d67bf268bb47fe75a9a0b15598e99bfe9ea | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
# Copyright (C) 2019, Raffaello Bonghi <[email protected]>
# All rights reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Logging
import logging
# Launch command
import subprocess as sp
# Threading
from threading import Thread
# Tegrastats parser
from .tegra_parse import VALS, MTS, RAM, SWAP, IRAM, CPUS, TEMPS, VOLTS
# Create logger for jplotlib
logger = logging.getLogger(__name__)
class Tegrastats(Thread):
"""
- Subprocess read:
https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python/4896288#4896288
- Property
https://www.programiz.com/python-programming/property
"""
class TegrastatsException(Exception):
pass
def __init__(self, path, interval):
Thread.__init__(self)
# Set interval tegrastats
self.interval = interval
# Initialize jetson stats
self._stats = {}
# Start process tegrastats
self.path = path
# Define Tegrastats process
self.p = None
# Initialize observer
self._observers = set()
def run(self):
try:
while self.p.poll() is None:
out = self.p.stdout
if out is not None:
# Read line process output
line = out.readline()
# Decode line in UTF-8
tegrastats_data = line.decode("utf-8")
# Decode and store
self._stats = self._decode(tegrastats_data)
# Notifiy all observers
for observer in self._observers:
observer.update(self._stats)
except SystemExit:
logger.error("System exit", exc_info=True)
except AttributeError:
logger.error("Attribute error", exc_info=True)
@property
def stats(self):
# Return dictionary parsed
return self._stats
def attach(self, observer):
self._observers.add(observer)
def detach(self, observer):
self._observers.discard(observer)
def open(self, callback=None):
try:
# Launch subprocess or raise and exception
self.p = sp.Popen([self.path, '--interval', str(self.interval)], stdout=sp.PIPE)
# Start himself like file
self.daemon = True
self.start()
# Wait first value not empty
while not self._stats:
pass
# If callback is defined after each decode will be send the updates by function
if callback is not None:
self.attach(callback)
return True
except OSError:
logger.error("Tegrastats not in list!")
raise Tegrastats.TegrastatsException("Tegrastats is not available on this hardware")
return False
def close(self):
if self.p is not None:
self.p.kill()
return True
else:
return False
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def _decode(self, text):
# Find and parse all single values
stats = VALS(text)
# Parse if exist MTS
mts = MTS(text)
if mts:
stats['MTS'] = mts
# Parse RAM
stats['RAM'] = RAM(text)
# If exists parse SWAP
swap = SWAP(text)
if swap:
stats['SWAP'] = swap
# If exists parse IRAM
iram = IRAM(text)
if iram:
stats['IRAM'] = iram
# Parse CPU status
stats['CPU'] = CPUS(text)
# Parse temperatures
stats['TEMP'] = TEMPS(text)
# Parse voltages
stats['VOLT'] = VOLTS(text)
return stats
# EOF
| 34.219355 | 115 | 0.625 |
79440c302f15194637e767cbd4116cade302422a | 140,196 | py | Python | tests/test_temporal.py | jasonb5/xcdat | 4a35d6a6131fe3fec22593f54a9e48b640ceac4f | [
"Apache-2.0"
] | null | null | null | tests/test_temporal.py | jasonb5/xcdat | 4a35d6a6131fe3fec22593f54a9e48b640ceac4f | [
"Apache-2.0"
] | null | null | null | tests/test_temporal.py | jasonb5/xcdat | 4a35d6a6131fe3fec22593f54a9e48b640ceac4f | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
import cftime
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from xarray.tests import requires_dask
from tests.fixtures import generate_dataset
from xcdat.temporal import TemporalAccessor
class TestTemporalAccessor:
def test__init__(self):
ds: xr.Dataset = generate_dataset(cf_compliant=True, has_bounds=True)
obj = TemporalAccessor(ds)
assert obj._dataset.identical(ds)
def test_decorator(self):
ds: xr.Dataset = generate_dataset(cf_compliant=True, has_bounds=True)
obj = ds.temporal
assert obj._dataset.identical(ds)
class TestAverage:
def test_weighted_annual_avg(self):
ds: xr.Dataset = generate_dataset(cf_compliant=True, has_bounds=True)
result = ds.temporal.average("ts", "year")
expected = ds.copy()
expected = expected.drop_dims("time")
time_new = xr.DataArray(
data=np.array(
["2000-01-01T00:00:00.000000000", "2001-01-01T00:00:00.000000000"],
dtype="datetime64[ns]",
),
coords={
"time": np.array(
[
"2000-01-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
)
},
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
expected["ts"] = xr.DataArray(
name="ts",
data=np.ones((2, 4, 4)),
coords={
"lat": ds.lat,
"lon": ds.lon,
"time": time_new,
},
dims=["time", "lat", "lon"],
attrs={
"operation": "temporal_avg",
"mode": "time_series",
"freq": "year",
"weighted": "True",
"center_times": "False",
},
)
# For some reason, there is a very small floating point difference
# between both for ts so we have to use floating point comparison
xr.testing.assert_allclose(result, expected)
assert result.ts.attrs == expected.ts.attrs
class TestClimatology:
def test_weighted_seasonal_climatology_with_DJF(self):
ds: xr.Dataset = generate_dataset(cf_compliant=True, has_bounds=True)
result = ds.temporal.climatology(
"ts",
"season",
season_config={"dec_mode": "DJF", "drop_incomplete_djf": True},
)
expected = ds.copy()
expected = expected.drop_dims("time")
expected_time = xr.DataArray(
data=np.array(
[
cftime.datetime(1, 1, 1),
cftime.datetime(1, 4, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 10, 1),
],
),
coords={
"time": np.array(
[
cftime.datetime(1, 1, 1),
cftime.datetime(1, 4, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 10, 1),
],
),
},
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
expected["ts"] = xr.DataArray(
name="ts",
data=np.ones((4, 4, 4)),
coords={"lat": ds.lat, "lon": ds.lon, "time": expected_time},
dims=["time", "lat", "lon"],
attrs={
"operation": "temporal_avg",
"mode": "climatology",
"freq": "season",
"weighted": "True",
"center_times": "False",
"dec_mode": "DJF",
"drop_incomplete_djf": "True",
},
)
assert result.identical(expected)
class TestDepartures:
# TODO: Update TestDepartures tests to use other numbers rather than 1's for
# better test reliability and accuracy. This may require subsetting.
@pytest.fixture(autouse=True)
def setup(self):
self.ds: xr.Dataset = generate_dataset(cf_compliant=True, has_bounds=True)
self.seasons = ["JJA", "MAM", "SON", "DJF"]
def test_weighted_seasonal_departures_with_DJF(self):
# Create a post-climatology dataset.
ds = self.ds.copy()
# Drop incomplete DJF seasons
ds = ds.isel(time=slice(2, -1))
# Compare result of the method against the expected.
result = ds.temporal.departures(
"ts",
"season",
season_config={"dec_mode": "DJF", "drop_incomplete_djf": True},
)
expected = ds.copy()
expected["ts"] = xr.DataArray(
data=np.zeros((12, 4, 4)),
coords={
"lat": ds.lat,
"lon": ds.lon,
"time": ds.time,
},
dims=["time", "lat", "lon"],
attrs={
"operation": "temporal_avg",
"mode": "departures",
"freq": "season",
"weighted": "True",
"center_times": "False",
"dec_mode": "DJF",
"drop_incomplete_djf": "True",
},
)
assert result.identical(expected)
def test_unweighted_seasonal_departures_with_DJF(self):
ds = self.ds.copy()
# Drop incomplete DJF seasons
ds = ds.isel(time=slice(2, -1))
# Compare result of the method against the expected.
result = ds.temporal.departures(
"ts",
"season",
weighted=False,
season_config={"dec_mode": "DJF", "drop_incomplete_djf": True},
)
expected = ds.copy()
expected["ts"] = xr.DataArray(
data=np.zeros((12, 4, 4)),
coords={
"lat": ds.lat,
"lon": ds.lon,
"time": ds.time,
},
dims=["time", "lat", "lon"],
attrs={
"operation": "temporal_avg",
"mode": "departures",
"freq": "season",
"weighted": "False",
"center_times": "False",
"dec_mode": "DJF",
"drop_incomplete_djf": "True",
},
)
assert result.identical(expected)
def test_unweighted_seasonal_departures_with_JFD(self):
ds = self.ds.copy()
# Compare result of the method against the expected.
result = ds.temporal.departures(
"ts",
"season",
weighted=False,
season_config={"dec_mode": "JFD"},
)
expected = ds.copy()
expected["ts"] = xr.DataArray(
data=np.zeros((15, 4, 4)),
coords={
"lat": ds.lat,
"lon": ds.lon,
"time": ds.time,
},
dims=["time", "lat", "lon"],
attrs={
"operation": "temporal_avg",
"mode": "departures",
"freq": "season",
"weighted": "False",
"center_times": "False",
"dec_mode": "JFD",
},
)
assert result.identical(expected)
class TestCenterTimes:
@pytest.fixture(autouse=True)
def setup(self):
self.ds = generate_dataset(cf_compliant=True, has_bounds=True)
def test_raises_error_if_time_dimension_does_not_exist_in_dataset(self):
ds = self.ds.copy()
ds = ds.drop_dims("time")
with pytest.raises(KeyError):
ds.temporal.center_times(ds)
def test_gets_time_as_the_midpoint_between_time_bounds(self):
ds = self.ds.copy()
# Make the time coordinates uncentered.
uncentered_time = np.array(
[
"2000-01-31T12:00:00.000000000",
"2000-02-29T12:00:00.000000000",
"2000-03-31T12:00:00.000000000",
"2000-04-30T00:00:00.000000000",
"2000-05-31T12:00:00.000000000",
"2000-06-30T00:00:00.000000000",
"2000-07-31T12:00:00.000000000",
"2000-08-31T12:00:00.000000000",
"2000-09-30T00:00:00.000000000",
"2000-10-16T12:00:00.000000000",
"2000-11-30T00:00:00.000000000",
"2000-12-31T12:00:00.000000000",
"2001-01-31T12:00:00.000000000",
"2001-02-28T00:00:00.000000000",
"2001-12-31T12:00:00.000000000",
],
dtype="datetime64[ns]",
)
ds.time.data[:] = uncentered_time
# Set object attrs required to test the method.
ds.temporal._time_bounds = ds.time_bnds.copy()
# Compare result of the method against the expected.
expected = ds.copy()
expected_time_data = np.array(
[
"2000-01-16T12:00:00.000000000",
"2000-02-15T12:00:00.000000000",
"2000-03-16T12:00:00.000000000",
"2000-04-16T00:00:00.000000000",
"2000-05-16T12:00:00.000000000",
"2000-06-16T00:00:00.000000000",
"2000-07-16T12:00:00.000000000",
"2000-08-16T12:00:00.000000000",
"2000-09-16T00:00:00.000000000",
"2000-10-16T12:00:00.000000000",
"2000-11-16T00:00:00.000000000",
"2000-12-16T12:00:00.000000000",
"2001-01-16T12:00:00.000000000",
"2001-02-15T00:00:00.000000000",
"2001-12-16T12:00:00.000000000",
],
dtype="datetime64[ns]",
)
expected = expected.assign_coords(
{
"time": xr.DataArray(
name="time",
data=expected_time_data,
coords={"time": expected_time_data},
dims="time",
attrs={
"long_name": "time",
"standard_name": "time",
"axis": "T",
"bounds": "time_bnds",
},
)
}
)
# Update time bounds with centered time coordinates.
time_bounds = ds.time_bnds.copy()
time_bounds["time"] = expected.time
expected["time_bnds"] = time_bounds
result = ds.temporal.center_times(ds)
assert result.identical(expected)
class TestTemporalAvg:
# TODO: Update TestTimeSeries tests to use other numbers rather than 1's
# for better test reliability and accuracy. This may require subsetting.
class TestTimeSeries:
@pytest.fixture(autouse=True)
def setup(self):
# FIXME: Update test this so that it is accurate, rather than 1's
# for averages
# May involve subsetting
self.ds: xr.Dataset = generate_dataset(cf_compliant=True, has_bounds=True)
def test_weighted_annual_avg(self):
ds = self.ds.copy()
result = ds.temporal._temporal_avg("ts", "time_series", "year")
expected = ds.copy()
expected = expected.drop_dims("time")
time_new = xr.DataArray(
data=np.array(
["2000-01-01T00:00:00.000000000", "2001-01-01T00:00:00.000000000"],
dtype="datetime64[ns]",
),
coords={
"time": np.array(
[
"2000-01-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
)
},
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
expected["ts"] = xr.DataArray(
name="ts",
data=np.ones((2, 4, 4)),
coords={
"lat": self.ds.lat,
"lon": self.ds.lon,
"time": time_new,
},
dims=["time", "lat", "lon"],
attrs={
"operation": "temporal_avg",
"mode": "time_series",
"freq": "year",
"weighted": "True",
"center_times": "False",
},
)
# For some reason, there is a very small floating point difference
# between both for ts so we have to use floating point comparison
xr.testing.assert_allclose(result, expected)
assert result.ts.attrs == expected.ts.attrs
@requires_dask
def test_weighted_annual_avg_with_chunking(self):
ds = self.ds.copy().chunk({"time": 2})
result = ds.temporal._temporal_avg("ts", "time_series", "year")
expected = ds.copy()
expected = expected.drop_dims("time")
time_new = xr.DataArray(
data=np.array(
["2000-01-01T00:00:00.000000000", "2001-01-01T00:00:00.000000000"],
dtype="datetime64[ns]",
),
coords={
"time": np.array(
[
"2000-01-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
)
},
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
expected["ts"] = xr.DataArray(
name="ts",
data=np.ones((2, 4, 4)),
coords={
"lat": self.ds.lat,
"lon": self.ds.lon,
"time": time_new,
},
dims=["time", "lat", "lon"],
attrs={
"operation": "temporal_avg",
"mode": "time_series",
"freq": "year",
"weighted": "True",
"center_times": "False",
},
)
# For some reason, there is a very small floating point difference
# between both for ts so we have to use floating point comparison
xr.testing.assert_allclose(result, expected)
assert result.ts.attrs == expected.ts.attrs
def test_weighted_annual_avg_with_centering_time(self):
ds = self.ds.copy()
result = ds.temporal._temporal_avg(
"ts", "time_series", "year", center_times=True
)
expected = ds.copy()
expected = expected.drop_dims("time")
time_new = xr.DataArray(
data=np.array(
["2000-01-01T00:00:00.000000000", "2001-01-01T00:00:00.000000000"],
dtype="datetime64[ns]",
),
coords={
"time": np.array(
[
"2000-01-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
)
},
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
expected["ts"] = xr.DataArray(
name="ts",
data=np.ones((2, 4, 4)),
coords={
"lat": self.ds.lat,
"lon": self.ds.lon,
"time": time_new,
},
dims=["time", "lat", "lon"],
attrs={
"operation": "temporal_avg",
"mode": "time_series",
"freq": "year",
"weighted": "True",
"center_times": "True",
},
)
# For some reason, there is a floating point difference between both
# for ts so we have to use floating point comparison
xr.testing.assert_allclose(result, expected)
assert result.ts.attrs == expected.ts.attrs
def test_weighted_seasonal_avg_with_DJF(self):
ds = self.ds.copy()
result = ds.temporal._temporal_avg(
"ts",
"time_series",
"season",
season_config={"dec_mode": "DJF", "drop_incomplete_djf": True},
)
expected = ds.copy()
# Drop the incomplete DJF seasons
expected = expected.isel(time=slice(2, -1))
expected = expected.drop_dims("time")
time_new = xr.DataArray(
data=np.array(
[
"2000-04-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
),
coords={
"time": np.array(
[
"2000-04-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
)
},
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
expected["ts"] = xr.DataArray(
name="ts",
data=np.ones((4, 4, 4)),
coords={
"lat": self.ds.lat,
"lon": self.ds.lon,
"time": time_new,
},
dims=["time", "lat", "lon"],
attrs={
"operation": "temporal_avg",
"mode": "time_series",
"freq": "season",
"weighted": "True",
"center_times": "False",
"dec_mode": "DJF",
"drop_incomplete_djf": "True",
},
)
assert result.identical(expected)
def test_weighted_seasonal_avg_with_DJF_without_dropping_incomplete_seasons(
self,
):
ds = self.ds.copy()
result = ds.temporal._temporal_avg(
"ts",
"time_series",
"season",
season_config={"dec_mode": "DJF", "drop_incomplete_djf": False},
)
expected = ds.copy()
expected = expected.drop_dims("time")
time_new = xr.DataArray(
data=np.array(
[
"2000-01-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2002-01-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
),
coords={
"time": np.array(
[
"2000-01-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2002-01-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
)
},
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
expected["ts"] = xr.DataArray(
name="ts",
data=np.ones((6, 4, 4)),
coords={
"lat": self.ds.lat,
"lon": self.ds.lon,
"time": time_new,
},
dims=["time", "lat", "lon"],
attrs={
"operation": "temporal_avg",
"mode": "time_series",
"freq": "season",
"weighted": "True",
"center_times": "False",
"dec_mode": "DJF",
"drop_incomplete_djf": "False",
},
)
assert result.identical(expected)
def test_weighted_seasonal_avg_with_JFD(self):
ds = self.ds.copy()
ds = self.ds.copy()
result = ds.temporal._temporal_avg(
"ts",
"time_series",
"season",
season_config={"dec_mode": "JFD"},
)
expected = ds.copy()
expected = expected.drop_dims("time")
time_new = xr.DataArray(
data=np.array(
[
"2000-01-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
),
coords={
"time": np.array(
[
"2000-01-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
)
},
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
expected["ts"] = xr.DataArray(
name="ts",
data=np.ones((5, 4, 4)),
coords={
"lat": self.ds.lat,
"lon": self.ds.lon,
"time": time_new,
},
dims=["time", "lat", "lon"],
attrs={
"operation": "temporal_avg",
"mode": "time_series",
"freq": "season",
"weighted": "True",
"center_times": "False",
"dec_mode": "JFD",
},
)
assert result.identical(expected)
def test_weighted_custom_season_avg(self):
ds = self.ds.copy()
custom_seasons = [
["Jan", "Feb", "Mar"],
["Apr", "May", "Jun"],
["Jul", "Aug", "Sep"],
["Oct", "Nov", "Dec"],
]
result = ds.temporal._temporal_avg(
"ts",
"time_series",
"season",
season_config={"custom_seasons": custom_seasons},
)
expected = ds.copy()
expected = expected.drop_dims("time")
expected_time = xr.DataArray(
data=np.array(
[
"2000-02-01T00:00:00.000000000",
"2000-05-01T00:00:00.000000000",
"2000-08-01T00:00:00.000000000",
"2000-11-01T00:00:00.000000000",
"2001-02-01T00:00:00.000000000",
"2001-11-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
),
coords={
"time": np.array(
[
"2000-02-01T00:00:00.000000000",
"2000-05-01T00:00:00.000000000",
"2000-08-01T00:00:00.000000000",
"2000-11-01T00:00:00.000000000",
"2001-02-01T00:00:00.000000000",
"2001-11-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
),
},
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
expected["ts"] = xr.DataArray(
name="ts",
data=np.ones((6, 4, 4)),
coords={"lat": self.ds.lat, "lon": self.ds.lon, "time": expected_time},
dims=["time", "lat", "lon"],
attrs={
"operation": "temporal_avg",
"mode": "time_series",
"freq": "season",
"custom_seasons": [
"JanFebMar",
"AprMayJun",
"JulAugSep",
"OctNovDec",
],
"weighted": "True",
"center_times": "False",
},
)
assert result.identical(expected)
def test_weighted_monthly_avg(self):
ds = self.ds.copy()
result = ds.temporal._temporal_avg("ts", "time_series", "month")
expected = ds.copy()
expected = expected.drop_dims("time")
time_new = xr.DataArray(
data=np.array(
[
"2000-01-01T00:00:00.000000000",
"2000-02-01T00:00:00.000000000",
"2000-03-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-05-01T00:00:00.000000000",
"2000-06-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-08-01T00:00:00.000000000",
"2000-09-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2000-11-01T00:00:00.000000000",
"2000-12-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2001-02-01T00:00:00.000000000",
"2001-12-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
),
coords={
"time": np.array(
[
"2000-01-01T00:00:00.000000000",
"2000-02-01T00:00:00.000000000",
"2000-03-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-05-01T00:00:00.000000000",
"2000-06-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-08-01T00:00:00.000000000",
"2000-09-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2000-11-01T00:00:00.000000000",
"2000-12-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2001-02-01T00:00:00.000000000",
"2001-12-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
),
},
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
expected["ts"] = xr.DataArray(
name="ts",
data=np.ones((15, 4, 4)),
coords={
"lat": self.ds.lat,
"lon": self.ds.lon,
"time": time_new,
},
dims=["time", "lat", "lon"],
attrs={
"operation": "temporal_avg",
"mode": "time_series",
"freq": "month",
"weighted": "True",
"center_times": "False",
},
)
assert result.identical(expected)
def test_weighted_daily_avg(self):
ds = self.ds.copy()
result = ds.temporal._temporal_avg("ts", "time_series", "day")
expected = ds.copy()
expected = expected.drop_dims("time")
time_new = xr.DataArray(
data=np.array(
[
"2000-01-16T00:00:00.000000000",
"2000-02-15T00:00:00.000000000",
"2000-03-16T00:00:00.000000000",
"2000-04-16T00:00:00.000000000",
"2000-05-16T00:00:00.000000000",
"2000-06-16T00:00:00.000000000",
"2000-07-16T00:00:00.000000000",
"2000-08-16T00:00:00.000000000",
"2000-09-16T00:00:00.000000000",
"2000-10-16T00:00:00.000000000",
"2000-11-16T00:00:00.000000000",
"2000-12-16T00:00:00.000000000",
"2001-01-16T00:00:00.000000000",
"2001-02-15T00:00:00.000000000",
"2001-12-16T00:00:00.000000000",
],
dtype="datetime64[ns]",
),
coords={
"time": np.array(
[
"2000-01-16T00:00:00.000000000",
"2000-02-15T00:00:00.000000000",
"2000-03-16T00:00:00.000000000",
"2000-04-16T00:00:00.000000000",
"2000-05-16T00:00:00.000000000",
"2000-06-16T00:00:00.000000000",
"2000-07-16T00:00:00.000000000",
"2000-08-16T00:00:00.000000000",
"2000-09-16T00:00:00.000000000",
"2000-10-16T00:00:00.000000000",
"2000-11-16T00:00:00.000000000",
"2000-12-16T00:00:00.000000000",
"2001-01-16T00:00:00.000000000",
"2001-02-15T00:00:00.000000000",
"2001-12-16T00:00:00.000000000",
],
dtype="datetime64[ns]",
),
},
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
expected["ts"] = xr.DataArray(
name="ts",
data=np.ones((15, 4, 4)),
coords={
"lat": self.ds.lat,
"lon": self.ds.lon,
"time": time_new,
},
dims=["time", "lat", "lon"],
attrs={
"operation": "temporal_avg",
"mode": "time_series",
"freq": "day",
"weighted": "True",
"center_times": "False",
},
)
assert result.identical(expected)
def test_weighted_hourly_avg(self):
ds = self.ds.copy()
ds.coords["time"].attrs["bounds"] = "time_bnds"
result = ds.temporal._temporal_avg("ts", "time_series", "hour")
expected = ds.copy()
expected["ts_original"] = ds.ts.copy()
expected["ts"] = xr.DataArray(
name="ts",
data=np.ones((15, 4, 4)),
coords={
"lat": self.ds.lat,
"lon": self.ds.lon,
"year_month_day_hour": pd.MultiIndex.from_tuples(
[
(2000, 1, 16, 12),
(2000, 2, 15, 12),
(2000, 3, 16, 12),
(2000, 4, 16, 0),
(2000, 5, 16, 12),
(2000, 6, 16, 0),
(2000, 7, 16, 12),
(2000, 8, 16, 12),
(2000, 9, 16, 0),
(2000, 10, 16, 12),
(2000, 11, 16, 0),
(2000, 12, 16, 12),
(2001, 1, 16, 12),
(2001, 2, 15, 0),
(2001, 12, 16, 12),
]
),
},
dims=["year_month_day_hour", "lat", "lon"],
attrs={
"operation": "temporal_avg",
"mode": "time_series",
"freq": "hour",
"weighted": "True",
"center_times": "False",
},
)
expected = ds.copy()
expected = expected.drop_dims("time")
time_new = xr.DataArray(
data=np.array(
[
"2000-01-16T12:00:00.000000000",
"2000-02-15T12:00:00.000000000",
"2000-03-16T12:00:00.000000000",
"2000-04-16T00:00:00.000000000",
"2000-05-16T12:00:00.000000000",
"2000-06-16T00:00:00.000000000",
"2000-07-16T12:00:00.000000000",
"2000-08-16T12:00:00.000000000",
"2000-09-16T00:00:00.000000000",
"2000-10-16T12:00:00.000000000",
"2000-11-16T00:00:00.000000000",
"2000-12-16T12:00:00.000000000",
"2001-01-16T12:00:00.000000000",
"2001-02-15T00:00:00.000000000",
"2001-12-16T12:00:00.000000000",
],
dtype="datetime64[ns]",
),
coords={
"time": np.array(
[
"2000-01-16T12:00:00.000000000",
"2000-02-15T12:00:00.000000000",
"2000-03-16T12:00:00.000000000",
"2000-04-16T00:00:00.000000000",
"2000-05-16T12:00:00.000000000",
"2000-06-16T00:00:00.000000000",
"2000-07-16T12:00:00.000000000",
"2000-08-16T12:00:00.000000000",
"2000-09-16T00:00:00.000000000",
"2000-10-16T12:00:00.000000000",
"2000-11-16T00:00:00.000000000",
"2000-12-16T12:00:00.000000000",
"2001-01-16T12:00:00.000000000",
"2001-02-15T00:00:00.000000000",
"2001-12-16T12:00:00.000000000",
],
dtype="datetime64[ns]",
),
},
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
expected["ts"] = xr.DataArray(
name="ts",
data=np.ones((15, 4, 4)),
coords={
"lat": self.ds.lat,
"lon": self.ds.lon,
"time": time_new,
},
dims=["time", "lat", "lon"],
attrs={
"operation": "temporal_avg",
"mode": "time_series",
"freq": "hour",
"weighted": "True",
"center_times": "False",
},
)
assert result.identical(expected)
class TestClimatology:
# TODO: Update TestClimatology tests to use other numbers rather than 1's
# for better test reliability and accuracy. This may require subsetting.
@pytest.fixture(autouse=True)
def setup(self):
self.ds: xr.Dataset = generate_dataset(cf_compliant=True, has_bounds=True)
def test_raises_error_without_time_dimension(self):
ds = self.ds.copy()
ds = ds.drop_dims("time")
with pytest.raises(KeyError):
ds.temporal._temporal_avg("climatology", "season", "ts")
def test_raises_error_with_incorrect_freq_arg(self):
with pytest.raises(ValueError):
self.ds.temporal._temporal_avg(
"ts",
"climatology",
"incorrect_freq",
)
def test_raises_error_with_incorrect_dec_mode_arg(self):
with pytest.raises(ValueError):
self.ds.temporal._temporal_avg(
"ts",
"climatology",
freq="season",
season_config={"dec_mode": "incorrect"},
)
def test_raises_error_if_data_var_does_not_exist_in_dataset(self):
with pytest.raises(KeyError):
self.ds.temporal._temporal_avg(
"nonexistent_var", "climatology", freq="season"
)
def test_weighted_seasonal_climatology_with_DJF(self):
ds = self.ds.copy()
result = ds.temporal._temporal_avg(
"ts",
"climatology",
"season",
season_config={"dec_mode": "DJF", "drop_incomplete_djf": True},
)
expected = ds.copy()
expected = expected.drop_dims("time")
expected_time = xr.DataArray(
data=np.array(
[
cftime.datetime(1, 1, 1),
cftime.datetime(1, 4, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 10, 1),
],
),
coords={
"time": np.array(
[
cftime.datetime(1, 1, 1),
cftime.datetime(1, 4, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 10, 1),
],
),
},
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
expected["ts"] = xr.DataArray(
name="ts",
data=np.ones((4, 4, 4)),
coords={"lat": self.ds.lat, "lon": self.ds.lon, "time": expected_time},
dims=["time", "lat", "lon"],
attrs={
"operation": "temporal_avg",
"mode": "climatology",
"freq": "season",
"weighted": "True",
"center_times": "False",
"dec_mode": "DJF",
"drop_incomplete_djf": "True",
},
)
assert result.identical(expected)
@requires_dask
def test_chunked_weighted_seasonal_climatology_with_DJF(self):
ds = self.ds.copy().chunk({"time": 2})
result = ds.temporal._temporal_avg(
"ts",
"climatology",
"season",
season_config={"dec_mode": "DJF", "drop_incomplete_djf": True},
)
expected = ds.copy()
expected = expected.drop_dims("time")
expected_time = xr.DataArray(
data=np.array(
[
cftime.datetime(1, 1, 1),
cftime.datetime(1, 4, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 10, 1),
],
),
coords={
"time": np.array(
[
cftime.datetime(1, 1, 1),
cftime.datetime(1, 4, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 10, 1),
],
),
},
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
expected["ts"] = xr.DataArray(
name="ts",
data=np.ones((4, 4, 4)),
coords={"lat": self.ds.lat, "lon": self.ds.lon, "time": expected_time},
dims=["time", "lat", "lon"],
attrs={
"operation": "temporal_avg",
"mode": "climatology",
"freq": "season",
"weighted": "True",
"center_times": "False",
"dec_mode": "DJF",
"drop_incomplete_djf": "True",
},
)
assert result.identical(expected)
def test_weighted_seasonal_climatology_with_JFD(self):
ds = self.ds.copy()
result = ds.temporal._temporal_avg(
"ts",
"climatology",
"season",
season_config={"dec_mode": "JFD"},
)
expected = ds.copy()
expected = expected.drop_dims("time")
expected_time = xr.DataArray(
data=np.array(
[
cftime.datetime(1, 1, 1),
cftime.datetime(1, 4, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 10, 1),
],
),
coords={
"time": np.array(
[
cftime.datetime(1, 1, 1),
cftime.datetime(1, 4, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 10, 1),
],
),
},
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
expected["ts"] = xr.DataArray(
name="ts",
data=np.ones((4, 4, 4)),
coords={"lat": self.ds.lat, "lon": self.ds.lon, "time": expected_time},
dims=["time", "lat", "lon"],
attrs={
"operation": "temporal_avg",
"mode": "climatology",
"freq": "season",
"weighted": "True",
"center_times": "False",
"dec_mode": "JFD",
},
)
assert result.identical(expected)
def test_weighted_custom_seasonal_climatology(self):
# FIXME: Fix this test
ds = self.ds.copy()
custom_seasons = [
["Jan", "Feb", "Mar"],
["Apr", "May", "Jun"],
["Jul", "Aug", "Sep"],
["Oct", "Nov", "Dec"],
]
result = ds.temporal._temporal_avg(
"ts",
"climatology",
"season",
season_config={"custom_seasons": custom_seasons},
)
expected = ds.copy()
expected = expected.drop_dims("time")
expected_time = xr.DataArray(
data=np.array(
[
cftime.datetime(1, 2, 1),
cftime.datetime(1, 5, 1),
cftime.datetime(1, 8, 1),
cftime.datetime(1, 11, 1),
],
),
coords={
"time": np.array(
[
cftime.datetime(1, 2, 1),
cftime.datetime(1, 5, 1),
cftime.datetime(1, 8, 1),
cftime.datetime(1, 11, 1),
],
),
},
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
expected["ts"] = xr.DataArray(
name="ts",
data=np.ones((4, 4, 4)),
coords={"lat": self.ds.lat, "lon": self.ds.lon, "time": expected_time},
dims=["time", "lat", "lon"],
attrs={
"operation": "temporal_avg",
"mode": "climatology",
"freq": "season",
"weighted": "True",
"center_times": "False",
"custom_seasons": [
"JanFebMar",
"AprMayJun",
"JulAugSep",
"OctNovDec",
],
},
)
assert result.identical(expected)
def test_weighted_monthly_climatology(self):
result = self.ds.temporal._temporal_avg("ts", "climatology", "month")
expected = self.ds.copy()
expected = expected.drop_dims("time")
expected_time = xr.DataArray(
data=np.array(
[
cftime.datetime(1, 1, 1),
cftime.datetime(1, 2, 1),
cftime.datetime(1, 3, 1),
cftime.datetime(1, 4, 1),
cftime.datetime(1, 5, 1),
cftime.datetime(1, 6, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 8, 1),
cftime.datetime(1, 9, 1),
cftime.datetime(1, 10, 1),
cftime.datetime(1, 11, 1),
cftime.datetime(1, 12, 1),
],
),
coords={
"time": np.array(
[
cftime.datetime(1, 1, 1),
cftime.datetime(1, 2, 1),
cftime.datetime(1, 3, 1),
cftime.datetime(1, 4, 1),
cftime.datetime(1, 5, 1),
cftime.datetime(1, 6, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 8, 1),
cftime.datetime(1, 9, 1),
cftime.datetime(1, 10, 1),
cftime.datetime(1, 11, 1),
cftime.datetime(1, 12, 1),
],
),
},
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
expected["ts"] = xr.DataArray(
name="ts",
data=np.ones((12, 4, 4)),
coords={"lat": self.ds.lat, "lon": self.ds.lon, "time": expected_time},
dims=["time", "lat", "lon"],
attrs={
"operation": "temporal_avg",
"mode": "climatology",
"freq": "month",
"weighted": "True",
"center_times": "False",
},
)
assert result.identical(expected)
def test_unweighted_monthly_climatology(self):
result = self.ds.temporal._temporal_avg(
"ts", "climatology", "month", weighted=False
)
expected = self.ds.copy()
expected = expected.drop_dims("time")
expected_time = xr.DataArray(
data=np.array(
[
cftime.datetime(1, 1, 1),
cftime.datetime(1, 2, 1),
cftime.datetime(1, 3, 1),
cftime.datetime(1, 4, 1),
cftime.datetime(1, 5, 1),
cftime.datetime(1, 6, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 8, 1),
cftime.datetime(1, 9, 1),
cftime.datetime(1, 10, 1),
cftime.datetime(1, 11, 1),
cftime.datetime(1, 12, 1),
],
),
coords={
"time": np.array(
[
cftime.datetime(1, 1, 1),
cftime.datetime(1, 2, 1),
cftime.datetime(1, 3, 1),
cftime.datetime(1, 4, 1),
cftime.datetime(1, 5, 1),
cftime.datetime(1, 6, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 8, 1),
cftime.datetime(1, 9, 1),
cftime.datetime(1, 10, 1),
cftime.datetime(1, 11, 1),
cftime.datetime(1, 12, 1),
],
),
},
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
expected["ts"] = xr.DataArray(
name="ts",
data=np.ones((12, 4, 4)),
coords={"lat": self.ds.lat, "lon": self.ds.lon, "time": expected_time},
dims=["time", "lat", "lon"],
attrs={
"operation": "temporal_avg",
"mode": "climatology",
"freq": "month",
"weighted": "False",
"center_times": "False",
},
)
assert result.identical(expected)
def test_weighted_daily_climatology(self):
result = self.ds.temporal._temporal_avg(
"ts", "climatology", "day", weighted=True
)
expected = self.ds.copy()
expected = expected.drop_dims("time")
expected_time = xr.DataArray(
data=np.array(
[
cftime.datetime(1, 1, 16),
cftime.datetime(1, 2, 15),
cftime.datetime(1, 3, 16),
cftime.datetime(1, 4, 16),
cftime.datetime(1, 5, 16),
cftime.datetime(1, 6, 16),
cftime.datetime(1, 7, 16),
cftime.datetime(1, 8, 16),
cftime.datetime(1, 9, 16),
cftime.datetime(1, 10, 16),
cftime.datetime(1, 11, 16),
cftime.datetime(1, 12, 16),
],
),
coords={
"time": np.array(
[
cftime.datetime(1, 1, 16),
cftime.datetime(1, 2, 15),
cftime.datetime(1, 3, 16),
cftime.datetime(1, 4, 16),
cftime.datetime(1, 5, 16),
cftime.datetime(1, 6, 16),
cftime.datetime(1, 7, 16),
cftime.datetime(1, 8, 16),
cftime.datetime(1, 9, 16),
cftime.datetime(1, 10, 16),
cftime.datetime(1, 11, 16),
cftime.datetime(1, 12, 16),
],
),
},
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
expected["ts"] = xr.DataArray(
name="ts",
data=np.ones((12, 4, 4)),
coords={"lat": self.ds.lat, "lon": self.ds.lon, "time": expected_time},
dims=["time", "lat", "lon"],
attrs={
"operation": "temporal_avg",
"mode": "climatology",
"freq": "day",
"weighted": "True",
"center_times": "False",
},
)
assert result.identical(expected)
def test_unweighted_daily_climatology(self):
result = self.ds.temporal._temporal_avg(
"ts", "climatology", "day", weighted=False
)
expected = self.ds.copy()
expected = expected.drop_dims("time")
expected_time = xr.DataArray(
data=np.array(
[
cftime.datetime(1, 1, 16),
cftime.datetime(1, 2, 15),
cftime.datetime(1, 3, 16),
cftime.datetime(1, 4, 16),
cftime.datetime(1, 5, 16),
cftime.datetime(1, 6, 16),
cftime.datetime(1, 7, 16),
cftime.datetime(1, 8, 16),
cftime.datetime(1, 9, 16),
cftime.datetime(1, 10, 16),
cftime.datetime(1, 11, 16),
cftime.datetime(1, 12, 16),
],
),
coords={
"time": np.array(
[
cftime.datetime(1, 1, 16),
cftime.datetime(1, 2, 15),
cftime.datetime(1, 3, 16),
cftime.datetime(1, 4, 16),
cftime.datetime(1, 5, 16),
cftime.datetime(1, 6, 16),
cftime.datetime(1, 7, 16),
cftime.datetime(1, 8, 16),
cftime.datetime(1, 9, 16),
cftime.datetime(1, 10, 16),
cftime.datetime(1, 11, 16),
cftime.datetime(1, 12, 16),
],
),
},
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
expected["ts"] = xr.DataArray(
name="ts",
data=np.ones((12, 4, 4)),
coords={"lat": self.ds.lat, "lon": self.ds.lon, "time": expected_time},
dims=["time", "lat", "lon"],
attrs={
"operation": "temporal_avg",
"mode": "climatology",
"freq": "day",
"weighted": "False",
"center_times": "False",
},
)
assert result.identical(expected)
class TestSetObjAttrs:
@pytest.fixture(autouse=True)
def setup(self):
self.ds = generate_dataset(cf_compliant=True, has_bounds=True)
def test_raises_error_if_operation_is_not_supported(self):
with pytest.raises(ValueError):
self.ds.temporal._set_obj_attrs(
"unsupported",
freq="season",
weighted=True,
center_times=True,
season_config={
"dec_mode": "DJF",
"drop_incomplete_djf": False,
"custom_seasons": None,
},
)
def test_raises_error_if_freq_arg_is_not_supported_by_operation(self):
ds = self.ds.copy()
with pytest.raises(ValueError):
ds.temporal._set_obj_attrs(
"time_series",
freq="unsupported",
weighted=True,
center_times=True,
season_config={
"dec_mode": "DJF",
"drop_incomplete_djf": False,
"custom_seasons": None,
},
)
with pytest.raises(ValueError):
ds.temporal._set_obj_attrs(
"climatology",
freq="unsupported",
weighted=True,
center_times=True,
season_config={
"dec_mode": "DJF",
"drop_incomplete_djf": False,
"custom_seasons": None,
},
)
with pytest.raises(ValueError):
ds.temporal._set_obj_attrs(
"departures",
freq="unsupported",
weighted=True,
center_times=True,
season_config={
"dec_mode": "DJF",
"drop_incomplete_djf": False,
"custom_seasons": None,
},
)
def test_does_not_raise_error_if_freq_arg_is_supported_by_operation(self):
ds = self.ds.copy()
climatology_freqs = ["season", "month", "day"]
departure_freqs = ["season", "month", "day"]
time_series_freqs = ["year", "season", "month", "day", "hour"]
for freq in time_series_freqs:
ds.temporal._set_obj_attrs(
"time_series",
freq=freq,
weighted=True,
center_times=True,
season_config={
"dec_mode": "DJF",
"drop_incomplete_djf": False,
"custom_seasons": None,
},
)
for freq in climatology_freqs:
ds.temporal._set_obj_attrs(
"climatology",
freq=freq,
weighted=True,
center_times=True,
season_config={
"dec_mode": "DJF",
"drop_incomplete_djf": False,
"custom_seasons": None,
},
)
for freq in departure_freqs:
ds.temporal._set_obj_attrs(
"departures",
freq=freq,
weighted=True,
center_times=True,
season_config={
"dec_mode": "DJF",
"drop_incomplete_djf": False,
"custom_seasons": None,
},
)
def test_raises_error_if_season_config_key_is_not_supported(self):
with pytest.raises(KeyError):
self.ds.temporal._set_obj_attrs(
"climatology",
freq="season",
weighted=True,
center_times=True,
season_config={
"not_supported": "invalid",
},
)
def test_raises_error_if_december_mode_is_not_supported(self):
with pytest.raises(ValueError):
self.ds.temporal._set_obj_attrs(
"climatology",
freq="season",
weighted=True,
center_times=True,
season_config={
"dec_mode": "unsupported",
"drop_incomplete_djf": False,
"custom_seasons": None,
},
)
def test_sets_object_attributes(self):
ds = self.ds.copy()
ds.temporal._set_obj_attrs(
"climatology",
freq="season",
weighted=True,
center_times=True,
season_config={"dec_mode": "JFD"},
)
assert ds.temporal._mode == "climatology"
assert ds.temporal._freq == "season"
assert ds.temporal._center_times
assert ds.temporal._weighted
assert ds.temporal._season_config == {"dec_mode": "JFD"}
ds.temporal._set_obj_attrs(
"climatology",
freq="season",
weighted=True,
center_times=True,
season_config={
"custom_seasons": [
["Jan", "Feb", "Mar"],
["Apr", "May", "Jun"],
["Jul", "Aug", "Sep"],
["Oct", "Nov", "Dec"],
],
},
)
assert ds.temporal._season_config == {
"custom_seasons": {
"JanFebMar": ["Jan", "Feb", "Mar"],
"AprMayJun": ["Apr", "May", "Jun"],
"JulAugSep": ["Jul", "Aug", "Sep"],
"OctNovDec": ["Oct", "Nov", "Dec"],
}
}
class TestCustomSeasons:
@pytest.fixture(autouse=True)
def setup(self):
self.ds = generate_dataset(cf_compliant=True, has_bounds=True)
self.expected = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
def test_raises_error_if_month_str_not_supported(self):
# Incorrect str "J".
with pytest.raises(ValueError):
self.ds.temporal._form_seasons(
custom_seasons=[
["J", "Feb", "Mar"],
["Apr", "May", "Jun"],
["Jul", "Aug", "Sep"],
["Oct", "Nov", "Dec"],
]
)
# Incorrect str "January".
with pytest.raises(ValueError):
self.ds.temporal._form_seasons(
custom_seasons=[
["January", "Feb", "Mar"],
["Apr", "May", "Jun"],
["Jul", "Aug", "Sep"],
["Oct", "Nov", "Dec"],
]
)
def test_raises_error_if_missing_months(self):
with pytest.raises(ValueError):
# "Jan" is missing.
self.ds.temporal._form_seasons(
custom_seasons=[
["Feb", "Mar"],
["Apr", "May", "Jun"],
["Jul", "Aug", "Sep"],
["Oct", "Nov", "Dec"],
]
)
def test_raises_error_if_duplicate_months_were_found(self):
with pytest.raises(ValueError):
# "Jan" is duplicated.
self.ds.temporal._form_seasons(
custom_seasons=[
["Jan", "Jan", "Feb"],
["Apr", "May", "Jun"],
["Jul", "Aug", "Sep"],
["Oct", "Nov", "Dec"],
]
)
def test_does_not_raise_error(self):
result = self.ds.temporal._form_seasons(
custom_seasons=[
["Jan", "Feb", "Mar"],
["Apr", "May", "Jun"],
["Jul", "Aug", "Sep"],
["Oct", "Nov", "Dec"],
]
)
expected = {
"JanFebMar": ["Jan", "Feb", "Mar"],
"AprMayJun": ["Apr", "May", "Jun"],
"JulAugSep": ["Jul", "Aug", "Sep"],
"OctNovDec": ["Oct", "Nov", "Dec"],
}
assert result == expected
result = self.ds.temporal._form_seasons(
custom_seasons=[
["Jan", "Feb", "Mar", "Apr", "May", "Jun"],
["Jul", "Aug", "Sep", "Oct", "Nov", "Dec"],
]
)
expected = {
"JanFebMarAprMayJun": ["Jan", "Feb", "Mar", "Apr", "May", "Jun"],
"JulAugSepOctNovDec": ["Jul", "Aug", "Sep", "Oct", "Nov", "Dec"],
}
assert result == expected
result = self.ds.temporal._form_seasons(
custom_seasons=[
["Jan", "Feb", "Mar"],
["Apr", "May", "Jun", "Jul"],
["Aug", "Sep", "Oct", "Nov", "Dec"],
]
)
expected = {
"JanFebMar": ["Jan", "Feb", "Mar"],
"AprMayJunJul": ["Apr", "May", "Jun", "Jul"],
"AugSepOctNovDec": ["Aug", "Sep", "Oct", "Nov", "Dec"],
}
assert result == expected
class TestAverager:
# FIXME: Update test this so that it is accurate, rather than 1's
# for averages
# May involve subsetting
@pytest.fixture(autouse=True)
def setup(self):
self.ds: xr.Dataset = generate_dataset(cf_compliant=True, has_bounds=True)
self.ds.attrs.update({"operation_type": "climatology"})
def test_weighted_by_month_day(self):
ds = self.ds.copy()
# Set object attrs required to test the method
ds.temporal._time_bounds = ds.time_bnds.copy()
ds.temporal._mode = "climatology"
ds.temporal._freq = "day"
ds.temporal._weighted = True
ds.temporal._center_times = True
ds.temporal._time_grouped = xr.DataArray(
name="month_day",
data=np.array(
[
cftime.datetime(1, 1, 16),
cftime.datetime(1, 2, 15),
cftime.datetime(1, 3, 16),
cftime.datetime(1, 4, 16),
cftime.datetime(1, 5, 6),
cftime.datetime(1, 6, 16),
cftime.datetime(1, 7, 16),
cftime.datetime(1, 8, 16),
cftime.datetime(1, 9, 16),
cftime.datetime(1, 10, 16),
cftime.datetime(1, 11, 16),
cftime.datetime(1, 12, 16),
cftime.datetime(1, 1, 16),
cftime.datetime(1, 2, 15),
cftime.datetime(1, 12, 16),
],
),
coords={"time": ds.time},
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
# Compare result of the method against the expected.
ts_result = ds.temporal._averager(ds["ts"])
ts_expected = np.ones((12, 4, 4))
assert np.allclose(ts_result, ts_expected)
def test_unweighted_daily_climatology(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
ds.temporal._time_bounds = ds.time_bnds.copy()
ds.temporal._mode = "climatology"
ds.temporal._freq = "day"
ds.temporal._weighted = False
ds.temporal._center_times = True
ds.temporal._time_grouped = xr.DataArray(
name="month_day",
data=np.array(
[
cftime.datetime(1, 1, 16),
cftime.datetime(1, 2, 15),
cftime.datetime(1, 3, 16),
cftime.datetime(1, 4, 16),
cftime.datetime(1, 5, 6),
cftime.datetime(1, 6, 16),
cftime.datetime(1, 7, 16),
cftime.datetime(1, 8, 16),
cftime.datetime(1, 9, 16),
cftime.datetime(1, 10, 16),
cftime.datetime(1, 11, 16),
cftime.datetime(1, 12, 16),
cftime.datetime(1, 1, 16),
cftime.datetime(1, 2, 15),
cftime.datetime(1, 12, 16),
],
),
coords={"time": ds.time},
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
# Compare result of the method against the expected.
ts_result = ds.temporal._averager(ds["ts"])
ts_expected = np.ones((12, 4, 4))
assert np.allclose(ts_result, ts_expected)
def test_weighted_annual_climatology(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
ds.temporal._time_bounds = ds.time_bnds.copy()
ds.temporal._mode = "climatology"
ds.temporal._freq = "month"
ds.temporal._weighted = True
ds.temporal._center_times = True
ds.temporal._time_grouped = xr.DataArray(
name="month",
data=np.array(
[
cftime.datetime(1, 1, 1),
cftime.datetime(1, 2, 1),
cftime.datetime(1, 3, 1),
cftime.datetime(1, 4, 1),
cftime.datetime(1, 5, 1),
cftime.datetime(1, 6, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 8, 1),
cftime.datetime(1, 9, 1),
cftime.datetime(1, 10, 1),
cftime.datetime(1, 11, 1),
cftime.datetime(1, 12, 1),
cftime.datetime(1, 1, 1),
cftime.datetime(1, 2, 1),
cftime.datetime(1, 12, 1),
],
),
coords={"time": ds.time},
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
# Compare result of the method against the expected.
# Check non-bounds variables were properly grouped and averaged
ts_result = ds.temporal._averager(ds["ts"])
ts_expected = np.ones((12, 4, 4))
assert np.allclose(ts_result, ts_expected)
def test_weighted_seasonal_climatology_with_DJF_and_drop_incomplete_DJF(self):
ds = self.ds.copy()
# Drop the incomplete seasons
ds = ds.isel(time=slice(2, -1))
# Set object attrs required to test the method.
ds.temporal._time_bounds = ds.time_bnds.copy()
ds.temporal._mode = "climatology"
ds.temporal._freq = "season"
ds.temporal._weighted = True
ds.temporal._center_times = True
ds.temporal._season_config = {
"dec_mode": "DJF",
"drop_incomplete_djf": True,
}
ds.temporal._time_grouped = xr.DataArray(
name="season",
data=np.array(
[
# MAM
cftime.datetime(1, 3, 1),
cftime.datetime(1, 3, 1),
cftime.datetime(1, 3, 1),
# JJA
cftime.datetime(1, 7, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 7, 1),
# SON
cftime.datetime(1, 10, 1),
cftime.datetime(1, 10, 1),
cftime.datetime(1, 10, 1),
# DJF
cftime.datetime(1, 1, 1),
cftime.datetime(1, 1, 1),
cftime.datetime(1, 1, 1),
],
),
coords={"time": ds.time},
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
# Compare result of the method against the expected.
# Check non-bounds variables were properly grouped and averaged
ts_result = ds.temporal._averager(ds["ts"])
ts_expected = np.ones((4, 4, 4))
assert np.allclose(ts_result, ts_expected)
def test_weighted_seasonal_climatology_with_JFD(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
ds.temporal._time_bounds = ds.time_bnds.copy()
ds.temporal._mode = "climatology"
ds.temporal._freq = "season"
ds.temporal._weighted = True
ds.temporal._center_times = True
ds.temporal._season_config = {"dec_mode": "JFD"}
ds.temporal._time_grouped = xr.DataArray(
name="season",
data=np.array(
[
# JFD
cftime.datetime(1, 1, 1),
cftime.datetime(1, 1, 1),
cftime.datetime(1, 1, 1),
# MAM
cftime.datetime(1, 3, 1),
cftime.datetime(1, 3, 1),
cftime.datetime(1, 3, 1),
# JJA
cftime.datetime(1, 7, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 7, 1),
# SON
cftime.datetime(1, 10, 1),
cftime.datetime(1, 10, 1),
cftime.datetime(1, 10, 1),
# JFD
cftime.datetime(1, 1, 1),
cftime.datetime(1, 1, 1),
cftime.datetime(1, 1, 1),
],
),
coords={"time": ds.time},
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
# Compare result of the method against the expected.
ts_result = ds.temporal._averager(ds["ts"])
ts_expected = np.ones((4, 4, 4))
assert np.allclose(ts_result, ts_expected)
class TestDropIncompleteDJF:
@pytest.fixture(autouse=True)
def setup(self):
self.ds: xr.Dataset = generate_dataset(cf_compliant=True, has_bounds=True)
def test_incomplete_DJF_seasons_are_dropped(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
ds.temporal._time_bounds = ds.time_bnds.copy()
# Define method inputs.
ds["ts"] = xr.DataArray(
data=np.ones(5),
coords={
"time": [
datetime(2000, 1, 1),
datetime(2000, 2, 1),
datetime(2000, 3, 1),
datetime(2000, 4, 1),
datetime(2001, 12, 1),
]
},
dims=["time"],
)
# Compare result of the method against the expected.
result = ds.temporal._drop_incomplete_djf(ds)
expected = ds.copy()
# Drop the incomplete DJF seasons
expected = expected.isel(time=slice(2, -1))
expected["ts"] = xr.DataArray(
data=np.ones(2),
coords={"time": [datetime(2000, 3, 1), datetime(2000, 4, 1)]},
dims=["time"],
)
assert result.identical(expected)
def test_does_not_drop_incomplete_DJF_seasons_since_if_dont_exist(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
ds.temporal._time_bounds = ds.time_bnds.copy()
# Update time coordinate points so that the months don't fall in
# incomplete seasons.
ds.time.values[0] = datetime(1999, 3, 1)
ds.time.values[1] = datetime(1999, 4, 1)
ds.time.values[-1] = datetime(1999, 5, 1)
# Compare result of the method against the expected.
result = ds.temporal._drop_incomplete_djf(ds)
expected = ds
assert result.identical(expected)
class TestGroupTimeCoords:
@pytest.fixture(autouse=True)
def setup(self):
self.ds = generate_dataset(cf_compliant=True, has_bounds=True)
def test_groups_time_coords_for_time_series_season_freq(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
ds.temporal._mode = "time_series"
ds.temporal._freq = "season"
ds.temporal._season_config = {"dec_mode": "DJF", "drop_incomplete_djf": False}
# Compare result of the method against the expected.
result = ds.temporal._group_time_coords(ds.ts)
expected = xr.DataArray(
name="year_month",
data=np.array(
[
"2000-01-01T00:00:00.000000000",
"2000-01-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2002-01-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
),
coords={"time": ds.time},
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
assert result.identical(expected)
def test_groups_time_coords_for_climatology_season_freq(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
ds.temporal._mode = "climatology"
ds.temporal._freq = "season"
ds.temporal._time_bounds = ds.time_bnds.copy()
ds.temporal._season_config = {"dec_mode": "DJF", "drop_incomplete_djf": False}
# Compare result of the method against the expected.
result = ds.temporal._group_time_coords(ds.ts)
expected = xr.DataArray(
name="month",
data=np.array(
[
cftime.datetime(1, 1, 1),
cftime.datetime(1, 1, 1),
cftime.datetime(1, 4, 1),
cftime.datetime(1, 4, 1),
cftime.datetime(1, 4, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 10, 1),
cftime.datetime(1, 10, 1),
cftime.datetime(1, 10, 1),
cftime.datetime(1, 1, 1),
cftime.datetime(1, 1, 1),
cftime.datetime(1, 1, 1),
cftime.datetime(1, 1, 1),
],
),
coords={"time": ds.time},
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
assert result.identical(expected)
class TestProcessSeasonDataFrame:
@pytest.fixture(autouse=True)
def setup(self):
self.ds = generate_dataset(cf_compliant=True, has_bounds=True)
self.df = pd.DataFrame(
data=np.array(
[
(2000, "DJF", 1),
(2000, "DJF", 2),
(2000, "MAM", 3),
(2000, "MAM", 4),
(2000, "MAM", 5),
(2000, "JJA", 6),
(2000, "JJA", 7),
(2000, "JJA", 8),
(2000, "SON", 9),
(2000, "SON", 10),
(2000, "SON", 11),
(2000, "DJF", 12),
],
dtype=object,
),
columns=["year", "season", "month"],
)
def test_maps_custom_seasons_if_custom_seasons_specified_and_drops_columns(self):
ds = self.ds.copy()
df = self.df.copy()
# Set object attrs required to test the method.
ds.temporal._mode = "time_series"
ds.temporal._season_config = {
"custom_seasons": {
"JanFebMar": ["Jan", "Feb", "Mar"],
"AprMayJun": ["Apr", "May", "Jun"],
"JulAugSep": ["Jul", "Aug", "Sep"],
"OctNovDec": ["Oct", "Nov", "Dec"],
}
}
# Compare result of the method against the expected.
result = ds.temporal._process_season_dataframe(df)
expected = pd.DataFrame(
data=np.array(
[
(2000, 2),
(2000, 2),
(2000, 2),
(2000, 5),
(2000, 5),
(2000, 5),
(2000, 8),
(2000, 8),
(2000, 8),
(2000, 11),
(2000, 11),
(2000, 11),
],
dtype=object,
),
columns=["year", "month"],
)
expected["month"] = expected.month.astype("int64")
assert result.equals(expected)
def test_shifts_decembers_for_DJF_if_DJF_is_specified(self):
ds = self.ds.copy()
df = self.df.copy()
# Set object attrs required to test the method.
ds.temporal._mode = "climatology"
ds.temporal._season_config = {
"dec_mode": "DJF",
"drop_incomplete_djf": True,
}
# Compare result of the method against the expected.
result = ds.temporal._process_season_dataframe(df)
expected = pd.DataFrame(
data=np.array(
[1, 1, 4, 4, 4, 7, 7, 7, 10, 10, 10, 1],
dtype="int64",
),
columns=["month"],
)
assert result.equals(expected)
class TestConvertDFtoDT:
@pytest.fixture(autouse=True)
def setup(self):
self.ds = generate_dataset(cf_compliant=True, has_bounds=True)
def test_converts_dataframe_to_datetime_for_seasonal_time_series(self):
ds = self.ds.copy()
df = pd.DataFrame(
data=[(2000, 1), (2000, 4), (2000, 7)],
columns=["year", "month"],
)
ds.temporal._mode = "time_series"
result = ds.temporal._convert_df_to_dt(df)
expected = np.array(
[
"2000-01-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
)
assert np.array_equal(result, expected)
def test_converts_dataframe_to_datetime_for_seasonal_climatology(self):
ds = self.ds.copy()
df = pd.DataFrame(data=[1, 4, 7], columns=["month"])
ds.temporal._mode = "climatology"
result = ds.temporal._convert_df_to_dt(df)
expected = np.array(
[
cftime.datetime(1, 1, 1, 0),
cftime.datetime(1, 4, 1, 0),
cftime.datetime(1, 7, 1, 0),
]
)
assert np.array_equal(result, expected)
def test_converts_dataframe_to_datetime_for_seasonal_departures(self):
ds = self.ds.copy()
df = pd.DataFrame(data=[1, 4, 7], columns=["month"])
ds.temporal._mode = "departures"
result = ds.temporal._convert_df_to_dt(df)
expected = np.array(
[
cftime.datetime(1, 1, 1, 0),
cftime.datetime(1, 4, 1, 0),
cftime.datetime(1, 7, 1, 0),
]
)
assert np.array_equal(result, expected)
class TestMapMonthsToCustomSeasons:
@pytest.fixture(autouse=True)
def setup(self):
self.ds = generate_dataset(cf_compliant=True, has_bounds=True)
def test_raises_error_if_custom_seasons_are_not_mapped(self):
ds = self.ds.copy()
ds.temporal._season_config = {"custom_seasons": None}
df = pd.DataFrame(
data=np.array(
[
(2000, "DJF", 1),
(2000, "DJF", 2),
(2000, "MAM", 3),
(2000, "MAM", 4),
(2000, "MAM", 5),
(2000, "JJA", 6),
(2000, "JJA", 7),
(2000, "JJA", 8),
(2000, "SON", 9),
(2000, "SON", 10),
(2000, "SON", 11),
(2000, "DJF", 12),
],
dtype=object,
),
columns=["year", "season", "month"],
)
with pytest.raises(ValueError):
ds.temporal._map_months_to_custom_seasons(df)
def test_maps_three_month_custom_seasons(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
ds.temporal._season_config = {
"custom_seasons": [
"JanFebMar",
"AprMayJun",
"JulAugSep",
"OctNovDec",
]
}
# Define method inputs.
# Includes default seasons.
df = pd.DataFrame(
data=np.array(
[
(2000, "DJF", 1),
(2000, "DJF", 2),
(2000, "MAM", 3),
(2000, "MAM", 4),
(2000, "MAM", 5),
(2000, "JJA", 6),
(2000, "JJA", 7),
(2000, "JJA", 8),
(2000, "SON", 9),
(2000, "SON", 10),
(2000, "SON", 11),
(2000, "DJF", 12),
],
dtype=object,
),
columns=["year", "season", "month"],
)
# Compare result of the method against the expected.
result = ds.temporal._map_months_to_custom_seasons(df)
expected = pd.DataFrame(
data=np.array(
[
(2000, "JanFebMar", 1),
(2000, "JanFebMar", 2),
(2000, "JanFebMar", 3),
(2000, "AprMayJun", 4),
(2000, "AprMayJun", 5),
(2000, "AprMayJun", 6),
(2000, "JulAugSep", 7),
(2000, "JulAugSep", 8),
(2000, "JulAugSep", 9),
(2000, "OctNovDec", 10),
(2000, "OctNovDec", 11),
(2000, "OctNovDec", 12),
],
dtype=object,
),
columns=["year", "season", "month"],
)
assert result.equals(expected)
def test_maps_six_month_custom_seasons(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
ds.temporal._season_config = {
"custom_seasons": [
"JanFebMarAprMayJun",
"JulAugSepOctNovDec",
]
}
# Define method inputs.
# Includes default seasons.
df = pd.DataFrame(
data=np.array(
[
(2000, "DJF", 1),
(2000, "DJF", 2),
(2000, "MAM", 3),
(2000, "MAM", 4),
(2000, "MAM", 5),
(2000, "JJA", 6),
(2000, "JJA", 7),
(2000, "JJA", 8),
(2000, "SON", 9),
(2000, "SON", 10),
(2000, "SON", 11),
(2000, "DJF", 12),
],
dtype=object,
),
columns=["year", "season", "month"],
)
# Compare result of the method against the expected.
result = ds.temporal._map_months_to_custom_seasons(df)
expected = pd.DataFrame(
data=np.array(
[
(2000, "JanFebMarAprMayJun", 1),
(2000, "JanFebMarAprMayJun", 2),
(2000, "JanFebMarAprMayJun", 3),
(2000, "JanFebMarAprMayJun", 4),
(2000, "JanFebMarAprMayJun", 5),
(2000, "JanFebMarAprMayJun", 6),
(2000, "JulAugSepOctNovDec", 7),
(2000, "JulAugSepOctNovDec", 8),
(2000, "JulAugSepOctNovDec", 9),
(2000, "JulAugSepOctNovDec", 10),
(2000, "JulAugSepOctNovDec", 11),
(2000, "JulAugSepOctNovDec", 12),
],
dtype=object,
),
columns=["year", "season", "month"],
)
assert result.equals(expected)
def test_maps_three_month_custom_seasons_random_order(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
ds.temporal._season_config = {
"custom_seasons": [
# Swapped Jan and Dec
"DecFebMar",
"AprMayJun",
"JulAugSep",
"OctNovJan",
]
}
# Define method inputs.
# Includes default seasons.
df = pd.DataFrame(
data=np.array(
[
(2000, "DJF", 1),
(2000, "DJF", 2),
(2000, "MAM", 3),
(2000, "MAM", 4),
(2000, "MAM", 5),
(2000, "JJA", 6),
(2000, "JJA", 7),
(2000, "JJA", 8),
(2000, "SON", 9),
(2000, "SON", 10),
(2000, "SON", 11),
(2000, "DJF", 12),
],
dtype=object,
),
columns=["year", "season", "month"],
)
# Compare result of the method against the expected.
result = ds.temporal._map_months_to_custom_seasons(df)
expected = pd.DataFrame(
data=np.array(
[
(2000, "OctNovJan", 1),
(2000, "DecFebMar", 2),
(2000, "DecFebMar", 3),
(2000, "AprMayJun", 4),
(2000, "AprMayJun", 5),
(2000, "AprMayJun", 6),
(2000, "JulAugSep", 7),
(2000, "JulAugSep", 8),
(2000, "JulAugSep", 9),
(2000, "OctNovJan", 10),
(2000, "OctNovJan", 11),
(2000, "DecFebMar", 12),
],
dtype=object,
),
columns=["year", "season", "month"],
)
assert result.equals(expected)
class TestMapSeasonstoMidMonths:
@pytest.fixture(autouse=True)
def setup(self):
self.ds = generate_dataset(cf_compliant=True, has_bounds=True)
def test_maps_predefined_seasons_to_middle_months(self):
ds = self.ds.copy()
ds.temporal._season_config = {"custom_seasons": None}
df = pd.DataFrame({"season": ["DJF", "MAM", "JJA", "SON"]})
result = ds.temporal._map_seasons_to_mid_months(df)
expected = pd.DataFrame({"month": [1, 4, 7, 10]})
assert result.equals(expected)
def test_maps_custom_seasons_with_odd_months_to_middle_months(self):
ds = self.ds.copy()
ds.temporal._season_config = {
"custom_seasons": {
"FebMarApr": ["Feb", "Mar", "Apr"],
"MayJunJul": ["May", "Jun", "Jul"],
"AugSepOct": ["Aug", "Sep", "Oct"],
"NovDecJan": ["Nov", "Dec", "Jan"],
}
}
df = pd.DataFrame(
{"season": ["FebMarApr", "MayJunJul", "AugSepOct", "NovDecJan"]}
)
result = ds.temporal._map_seasons_to_mid_months(df)
expected = pd.DataFrame({"month": [3, 6, 9, 12]})
assert result.equals(expected)
def test_maps_custom_seasons_with_even_months_to_middle_months(self):
ds = self.ds.copy()
ds.temporal._season_config = {
"custom_seasons": {
"FebMarAprMay": ["Feb", "Mar", "Apr", "May"],
"JunJulAugSep": ["Jun", "Jul", "Aug", "Sep"],
"OctNovDecJan": ["Oct", "Nov", "Dec", "Jan"],
}
}
df = pd.DataFrame({"season": ["FebMarAprMay", "JunJulAugSep", "OctNovDecJan"]})
result = ds.temporal._map_seasons_to_mid_months(df)
expected = pd.DataFrame({"month": [4, 8, 12]})
assert result.equals(expected)
class TestShiftDecembers:
@pytest.fixture(autouse=True)
def setup(self):
self.ds = generate_dataset(cf_compliant=True, has_bounds=True)
def test_decembers_shift_for_all_years(self):
ds = self.ds.copy()
# Define method inputs.
df = pd.DataFrame(
data=np.array(
[
(2000, "DJF", 1),
(2000, "DJF", 2),
(2000, "DJF", 12),
(2001, "DJF", 1),
(2001, "DJF", 2),
(2001, "DJF", 12),
],
dtype=object,
),
columns=["year", "season", "month"],
)
# Compare result of the method against the expected.
result = ds.temporal._shift_decembers(df)
expected = pd.DataFrame(
data=np.array(
[
(2000, "DJF", 1),
(2000, "DJF", 2),
(2001, "DJF", 12),
(2001, "DJF", 1),
(2001, "DJF", 2),
(2002, "DJF", 12),
],
dtype=object,
),
columns=["year", "season", "month"],
)
assert result.equals(expected)
class TestDropObsoleteColumns:
@pytest.fixture(autouse=True)
def setup(self):
self.ds = generate_dataset(cf_compliant=True, has_bounds=True)
def test_drops_month_col_for_time_series_operations(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
ds.temporal._mode = "time_series"
# Define method inputs.
df = pd.DataFrame(columns=["year", "season", "month"])
# Compare result of the method against the expected.
result = ds.temporal._drop_obsolete_columns(df)
expected = pd.DataFrame(columns=["year", "season"])
assert result.equals(expected)
def test_drops_year_and_month_cols_for_climatology_and_departure_operations(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
ds.temporal._mode = "climatology"
# Define method inputs.
df = pd.DataFrame(columns=["year", "season", "month"])
# Compare result of the method against the expected.
result = ds.temporal._drop_obsolete_columns(df)
expected = pd.DataFrame(columns=["season"])
assert result.equals(expected)
def test_raises_error_with_unsupported_operation(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
ds.temporal._mode = "unsupported_operation"
df = pd.DataFrame(columns=["year", "season", "month"])
with pytest.raises(ValueError):
ds.temporal._drop_obsolete_columns(df)
class TestCalculateWeights:
class TestClimatology:
@pytest.fixture(autouse=True)
def setup(self):
self.ds: xr.Dataset = generate_dataset(cf_compliant=True, has_bounds=True)
def test_weights_for_seasonal_climatology_with_DJF(self):
ds = self.ds.copy()
# Replace time and time bounds with incomplete seasons removed
ds = ds.drop_dims("time")
ds.coords["time"] = xr.DataArray(
data=np.array(
[
"2000-03-16T12:00:00.000000000",
"2000-04-16T00:00:00.000000000",
"2000-05-16T12:00:00.000000000",
"2000-06-16T00:00:00.000000000",
"2000-07-16T12:00:00.000000000",
"2000-08-16T12:00:00.000000000",
"2000-09-16T00:00:00.000000000",
"2000-10-16T12:00:00.000000000",
"2000-11-16T00:00:00.000000000",
"2000-12-16T12:00:00.000000000",
"2001-01-16T12:00:00.000000000",
"2001-02-15T00:00:00.000000000",
],
dtype="datetime64[ns]",
),
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
},
)
ds["ts"] = xr.DataArray(
name="ts",
data=np.ones((12, 4, 4)),
coords={"time": ds.time, "lat": ds.lat, "lon": ds.lon},
dims=["time", "lat", "lon"],
)
ds["time_bnds"] = xr.DataArray(
name="time_bnds",
data=np.array(
[
[
"2000-03-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
],
[
"2000-04-01T00:00:00.000000000",
"2000-05-01T00:00:00.000000000",
],
[
"2000-05-01T00:00:00.000000000",
"2000-06-01T00:00:00.000000000",
],
[
"2000-06-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
],
[
"2000-07-01T00:00:00.000000000",
"2000-08-01T00:00:00.000000000",
],
[
"2000-08-01T00:00:00.000000000",
"2000-09-01T00:00:00.000000000",
],
[
"2000-09-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
],
[
"2000-10-01T00:00:00.000000000",
"2000-11-01T00:00:00.000000000",
],
[
"2000-11-01T00:00:00.000000000",
"2000-12-01T00:00:00.000000000",
],
[
"2000-12-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
],
[
"2001-01-01T00:00:00.000000000",
"2001-02-01T00:00:00.000000000",
],
[
"2001-02-01T00:00:00.000000000",
"2001-03-01T00:00:00.000000000",
],
],
dtype="datetime64[ns]",
),
coords={"time": ds.time},
dims=["time", "bnds"],
attrs={
"is_generated": "True",
},
)
# Set object attrs required to test the method.
ds.temporal._time_bounds = ds.time_bnds.copy()
ds.temporal._mode = "climatology"
ds.temporal._freq = "season"
ds.temporal._weighted = "True"
ds.temporal.season_config = {"dec_mode": "DJF"}
ds.temporal._time_grouped = xr.DataArray(
name="season",
data=np.array(
[
cftime.datetime(1, 4, 1),
cftime.datetime(1, 4, 1),
cftime.datetime(1, 4, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 10, 1),
cftime.datetime(1, 10, 1),
cftime.datetime(1, 10, 1),
cftime.datetime(1, 1, 1),
cftime.datetime(1, 1, 1),
cftime.datetime(1, 1, 1),
],
),
coords={"time": ds.time},
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
# Compare result of the method against the expected.
result = ds.temporal._get_weights(ds["ts"])
expected = np.array(
[
0.33695652,
0.32608696,
0.33695652,
0.32608696,
0.33695652,
0.33695652,
0.32967033,
0.34065934,
0.32967033,
0.34444444,
0.34444444,
0.31111111,
]
)
assert np.allclose(result, expected, equal_nan=True)
def test_weights_for_seasonal_climatology_with_JFD(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
ds.temporal._time_bounds = ds.time_bnds.copy()
ds.temporal._mode = "climatology"
ds.temporal._freq = "season"
ds.temporal._weighted = "True"
ds.temporal.season_config = {"dec_mode": "JDF"}
ds.temporal._time_grouped = xr.DataArray(
name="season",
data=np.array(
[
cftime.datetime(1, 1, 1),
cftime.datetime(1, 1, 1),
cftime.datetime(1, 4, 1),
cftime.datetime(1, 4, 1),
cftime.datetime(1, 4, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 10, 1),
cftime.datetime(1, 10, 1),
cftime.datetime(1, 10, 1),
cftime.datetime(1, 1, 1),
cftime.datetime(1, 1, 1),
cftime.datetime(1, 1, 1),
cftime.datetime(1, 1, 1),
],
),
coords={"time": ds.time},
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
# Compare result of the method against the expected.
result = ds.temporal._get_weights(ds["ts"])
expected = np.array(
[
[
0.17127072,
0.16022099,
0.33695652,
0.32608696,
0.33695652,
0.32608696,
0.33695652,
0.33695652,
0.32967033,
0.34065934,
0.32967033,
0.17127072,
0.17127072,
0.15469613,
0.17127072,
]
]
)
assert np.allclose(result, expected, equal_nan=True)
def test_weights_for_annual_climatology(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
ds.temporal._time_bounds = ds.time_bnds.copy()
ds.temporal._mode = "climatology"
ds.temporal._freq = "month"
ds.temporal._weighted = "True"
ds.temporal.season_config = {"dec_mode": "DJF"}
ds.temporal._time_grouped = xr.DataArray(
name="month",
data=np.array(
[
cftime.datetime(1, 1, 1),
cftime.datetime(1, 2, 1),
cftime.datetime(1, 3, 1),
cftime.datetime(1, 4, 1),
cftime.datetime(1, 5, 1),
cftime.datetime(1, 6, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 8, 1),
cftime.datetime(1, 9, 1),
cftime.datetime(1, 10, 1),
cftime.datetime(1, 11, 1),
cftime.datetime(1, 12, 1),
cftime.datetime(1, 1, 1),
cftime.datetime(1, 2, 1),
cftime.datetime(1, 12, 1),
],
),
coords={"time": ds.time},
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
# Compare result of the method against the expected.
result = ds.temporal._get_weights(self.ds["ts"])
expected = np.array(
[
[
0.5,
0.50877193,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
0.5,
0.5,
0.49122807,
0.5,
]
]
)
assert np.allclose(result, expected)
def test_weights_for_daily_climatology(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
ds.temporal._time_bounds = ds.time_bnds.copy()
ds.temporal._mode = "climatology"
ds.temporal._freq = "day"
ds.temporal._weighted = "True"
ds.temporal._season_config = {
"dec_mode": "DJF",
"drop_incomplete_djf": True,
}
ds.temporal._time_grouped = xr.DataArray(
name="month_day",
data=np.array(
[
cftime.datetime(1, 1, 16),
cftime.datetime(1, 2, 15),
cftime.datetime(1, 3, 16),
cftime.datetime(1, 4, 16),
cftime.datetime(1, 5, 6),
cftime.datetime(1, 6, 16),
cftime.datetime(1, 7, 16),
cftime.datetime(1, 8, 16),
cftime.datetime(1, 9, 16),
cftime.datetime(1, 10, 16),
cftime.datetime(1, 11, 16),
cftime.datetime(1, 12, 16),
cftime.datetime(1, 1, 16),
cftime.datetime(1, 2, 15),
cftime.datetime(1, 12, 16),
],
),
coords={"time": ds.time},
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
# Compare result of the method against the expected.
result = ds.temporal._get_weights(self.ds["ts"])
expected = np.array(
[
0.5,
0.50877193,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
0.5,
0.5,
0.49122807,
0.5,
]
)
assert np.allclose(result, expected)
class TestTimeSeries:
@pytest.fixture(autouse=True)
def setup(self):
self.ds: xr.Dataset = generate_dataset(cf_compliant=True, has_bounds=True)
def test_weights_for_yearly_averages(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
ds.temporal._time_bounds = ds.time_bnds.copy()
ds.temporal._mode = "time_series"
ds.temporal._freq = "year"
ds.temporal._weighted = "True"
ds.temporal._time_grouped = xr.DataArray(
name="year",
data=np.array(
[
"2000-01-01T00:00:00.000000000",
"2000-01-01T00:00:00.000000000",
"2000-01-01T00:00:00.000000000",
"2000-01-01T00:00:00.000000000",
"2000-01-01T00:00:00.000000000",
"2000-01-01T00:00:00.000000000",
"2000-01-01T00:00:00.000000000",
"2000-01-01T00:00:00.000000000",
"2000-01-01T00:00:00.000000000",
"2000-01-01T00:00:00.000000000",
"2000-01-01T00:00:00.000000000",
"2000-01-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
),
coords={"time": ds.time},
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
# Compare result of the method against the expected.
result = ds.temporal._get_weights(self.ds["ts"])
expected = np.array(
[
0.08469945,
0.07923497,
0.08469945,
0.08196721,
0.08469945,
0.08196721,
0.08469945,
0.08469945,
0.08196721,
0.08469945,
0.08196721,
0.08469945,
0.34444444,
0.31111111,
0.34444444,
]
)
assert np.allclose(result, expected)
def test_weights_for_monthly_averages(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
ds.temporal._time_bounds = ds.time_bnds.copy()
ds.temporal._mode = "time_series"
ds.temporal._freq = "month"
ds.temporal._weighted = "True"
ds.temporal._time_grouped = xr.DataArray(
name="year_month",
data=np.array(
[
"2000-01-01T00:00:00.000000000",
"2000-02-01T00:00:00.000000000",
"2000-03-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-05-01T00:00:00.000000000",
"2000-06-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-08-01T00:00:00.000000000",
"2000-09-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2000-11-01T00:00:00.000000000",
"2000-12-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2001-02-01T00:00:00.000000000",
"20012-12-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
),
coords={"time": ds.time},
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
# Compare result of the method against the expected.
result = ds.temporal._get_weights(self.ds["ts"])
expected = np.ones(15)
assert np.allclose(result, expected)
def test_weights_for_seasonal_averages_with_DJF_and_drop_incomplete_seasons(
self,
):
ds = self.ds.copy()
# Replace time and time bounds with incomplete seasons removed
ds = ds.drop_dims("time")
ds.coords["time"] = xr.DataArray(
data=np.array(
[
"2000-03-16T12:00:00.000000000",
"2000-04-16T00:00:00.000000000",
"2000-05-16T12:00:00.000000000",
"2000-06-16T00:00:00.000000000",
"2000-07-16T12:00:00.000000000",
"2000-08-16T12:00:00.000000000",
"2000-09-16T00:00:00.000000000",
"2000-10-16T12:00:00.000000000",
"2000-11-16T00:00:00.000000000",
"2000-12-16T12:00:00.000000000",
"2001-01-16T12:00:00.000000000",
"2001-02-15T00:00:00.000000000",
],
dtype="datetime64[ns]",
),
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
},
)
ds["ts"] = xr.DataArray(
name="ts",
data=np.ones((12, 4, 4)),
coords={"time": ds.time, "lat": ds.lat, "lon": ds.lon},
dims=["time", "lat", "lon"],
)
ds["time_bnds"] = xr.DataArray(
name="time_bnds",
data=np.array(
[
[
"2000-03-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
],
[
"2000-04-01T00:00:00.000000000",
"2000-05-01T00:00:00.000000000",
],
[
"2000-05-01T00:00:00.000000000",
"2000-06-01T00:00:00.000000000",
],
[
"2000-06-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
],
[
"2000-07-01T00:00:00.000000000",
"2000-08-01T00:00:00.000000000",
],
[
"2000-08-01T00:00:00.000000000",
"2000-09-01T00:00:00.000000000",
],
[
"2000-09-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
],
[
"2000-10-01T00:00:00.000000000",
"2000-11-01T00:00:00.000000000",
],
[
"2000-11-01T00:00:00.000000000",
"2000-12-01T00:00:00.000000000",
],
[
"2000-12-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
],
[
"2001-01-01T00:00:00.000000000",
"2001-02-01T00:00:00.000000000",
],
[
"2001-02-01T00:00:00.000000000",
"2001-03-01T00:00:00.000000000",
],
],
dtype="datetime64[ns]",
),
coords={"time": ds.time},
dims=["time", "bnds"],
attrs={
"is_generated": "True",
},
)
# Set object attrs required to test the method.
ds.temporal._time_bounds = ds.time_bnds.copy()
ds.temporal._mode = "time_series"
ds.temporal._freq = "season"
ds.temporal._weighted = "True"
ds.temporal.season_config = {"dec_mode": "DJF"}
ds.temporal._time_grouped = xr.DataArray(
name="year_season",
data=np.array(
[
"2000-04-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
),
coords={"time": ds.time},
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
# Compare result of the method against the expected.
result = ds.temporal._get_weights(ds["ts"])
expected = np.array(
[
0.33695652,
0.32608696,
0.33695652,
0.32608696,
0.33695652,
0.33695652,
0.32967033,
0.34065934,
0.32967033,
0.34444444,
0.34444444,
0.31111111,
]
)
assert np.allclose(result, expected, equal_nan=True)
def test_weights_for_seasonal_averages_with_JFD(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
ds.temporal._time_bounds = ds.time_bnds.copy()
ds.temporal._mode = "time_series"
ds.temporal._freq = "season"
ds.temporal._weighted = "True"
ds.temporal.season_config = {"dec_mode": "JDF"}
ds.temporal._time_grouped = xr.DataArray(
name="year_season",
data=np.array(
[
"2000-01-01T00:00:00.000000000",
"2000-01-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
# This month is included in the JFD season
"2000-01-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2002-01-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
),
coords={"time": ds.time},
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
# Compare result of the method against the expected.
result = ds.temporal._get_weights(self.ds["ts"])
expected = np.array(
[
0.34065934,
0.31868132,
0.33695652,
0.32608696,
0.33695652,
0.32608696,
0.33695652,
0.33695652,
0.32967033,
0.34065934,
0.32967033,
0.34065934,
0.52542373,
0.47457627,
1.0,
]
)
assert np.allclose(result, expected)
def test_custom_season_time_series_weights(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
ds.temporal._time_bounds = ds.time_bnds.copy()
ds.temporal._mode = "time_series"
ds.temporal._freq = "season"
ds.temporal._weighted = "True"
ds.temporal._season_config = {
"custom_seasons": {
"JanFebMar": ["Jan", "Feb", "Mar"],
"AprMayJun": ["Apr", "May", "Jun"],
"JulAugSep": ["Jul", "Aug", "Sep"],
"OctNovDec": ["Oct", "Nov", "Dec"],
}
}
ds.temporal._time_grouped = xr.DataArray(
name="year_season",
data=np.array(
[
"2000-02-01T00:00:00.000000000",
"2000-02-01T00:00:00.000000000",
"2000-02-01T00:00:00.000000000",
"2000-05-01T00:00:00.000000000",
"2000-05-01T00:00:00.000000000",
"2000-05-01T00:00:00.000000000",
"2000-08-01T00:00:00.000000000",
"2000-08-01T00:00:00.000000000",
"2000-08-01T00:00:00.000000000",
"2000-11-01T00:00:00.000000000",
"2000-11-01T00:00:00.000000000",
"2000-11-01T00:00:00.000000000",
"2001-02-01T00:00:00.000000000",
"2001-02-01T00:00:00.000000000",
"2002-02-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
),
coords={"time": ds.time},
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
# Compare result of the method against the expected.
result = ds.temporal._get_weights(self.ds["ts"])
expected = np.array(
[
0.34065934,
0.31868132,
0.34065934,
0.32967033,
0.34065934,
0.32967033,
0.33695652,
0.33695652,
0.32608696,
0.33695652,
0.32608696,
0.33695652,
0.52542373,
0.47457627,
1.0,
]
)
assert np.allclose(result, expected)
def test_weights_for_daily_averages(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
ds.temporal._time_bounds = ds.time_bnds.copy()
ds.temporal._mode = "time_series"
ds.temporal._freq = "daily"
ds.temporal._weighted = "True"
ds.temporal._time_grouped = xr.DataArray(
name="year_month_day",
data=np.array(
[
"2000-01-16T00:00:00.000000000",
"2000-02-15T00:00:00.000000000",
"2000-03-16T00:00:00.000000000",
"2000-04-16T00:00:00.000000000",
"2000-05-16T00:00:00.000000000",
"2000-06-16T00:00:00.000000000",
"2000-07-16T00:00:00.000000000",
"2000-08-16T00:00:00.000000000",
"2000-09-16T00:00:00.000000000",
"2000-10-16T00:00:00.000000000",
"2000-11-16T00:00:00.000000000",
"2000-12-16T00:00:00.000000000",
"2001-01-16T00:00:00.000000000",
"2001-02-15T00:00:00.000000000",
"2001-12-16T00:00:00.000000000",
],
dtype="datetime64[ns]",
),
coords={"time": ds.time},
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
# Compare result of the method against the expected.
result = ds.temporal._get_weights(self.ds["ts"])
expected = np.ones(15)
assert np.allclose(result, expected)
def test_weights_for_hourly_averages(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
ds.temporal._time_bounds = ds.time_bnds.copy()
ds.temporal._mode = "time_series"
ds.temporal._freq = "hour"
ds.temporal._weighted = "True"
ds.temporal.season_config = {"dec_mode": "JDF"}
ds.temporal._time_grouped = xr.DataArray(
name="year_month_day_hour",
data=np.array(
[
"2000-01-16T12:00:00.000000000",
"2000-02-15T12:00:00.000000000",
"2000-03-16T12:00:00.000000000",
"2000-04-16T00:00:00.000000000",
"2000-05-16T12:00:00.000000000",
"2000-06-16T00:00:00.000000000",
"2000-07-16T12:00:00.000000000",
"2000-08-16T12:00:00.000000000",
"2000-09-16T00:00:00.000000000",
"2000-10-16T12:00:00.000000000",
"2000-11-16T00:00:00.000000000",
"2000-12-16T12:00:00.000000000",
"2001-01-16T12:00:00.000000000",
"2001-02-15T00:00:00.000000000",
"2001-12-16T12:00:00.000000000",
],
dtype="datetime64[ns]",
),
coords={"time": ds.time},
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
# Compare result of the method against the expected.
result = ds.temporal._get_weights(self.ds["ts"])
expected = np.ones(15)
assert np.allclose(result, expected)
class TestGroupByFreq:
@pytest.fixture(autouse=True)
def setup(self):
self.ds = generate_dataset(cf_compliant=True, has_bounds=True)
def test_groups_data_var_for_seasonal_averaging_with_JFD(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
time_grouped = xr.DataArray(
name="year_season",
data=np.array(
[
"2000-01-01T00:00:00.000000000",
"2000-01-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2002-01-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
),
coords={"time": ds.time},
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
ds.temporal._time_grouped = time_grouped
ts = ds.ts.copy()
expected = ts.copy()
expected.coords["year_season"] = time_grouped
expected = expected.groupby("year_season")
result = ds.temporal._groupby_freq(ts)
assert result.groups == expected.groups
def test_groups_data_var_for_seasonal_climatology_with_DJF(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
time_grouped = xr.DataArray(
name="season",
data=np.array(
[
# JFD
cftime.datetime(1, 1, 1),
cftime.datetime(1, 1, 1),
cftime.datetime(1, 1, 1),
# MAM
cftime.datetime(1, 3, 1),
cftime.datetime(1, 3, 1),
cftime.datetime(1, 3, 1),
# JJA
cftime.datetime(1, 7, 1),
cftime.datetime(1, 7, 1),
cftime.datetime(1, 7, 1),
# SON
cftime.datetime(1, 10, 1),
cftime.datetime(1, 10, 1),
cftime.datetime(1, 10, 1),
# JFD
cftime.datetime(1, 1, 1),
cftime.datetime(1, 1, 1),
cftime.datetime(1, 1, 1),
],
),
coords={"time": ds.time},
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
ds.temporal._time_grouped = time_grouped
ts = ds.ts.copy()
expected = ts.copy()
expected.coords["season"] = time_grouped
expected = expected.groupby("season")
result = ds.temporal._groupby_freq(ts)
assert result.groups == expected.groups
class TestAddOperationAttributes:
@pytest.fixture(autouse=True)
def setup(self):
self.ds = generate_dataset(cf_compliant=True, has_bounds=True)
def test_adds_attrs_to_data_var_with_DJF(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
ds.temporal._mode = "climatology"
ds.temporal._freq = "season"
ds.temporal._weighted = True
ds.temporal._center_times = True
ds.temporal._season_config = {
"dec_mode": "DJF",
"drop_incomplete_djf": "True",
}
ds.temporal._time_grouped = xr.DataArray(
name="year_season",
data=np.array(
[
"2000-01-01T00:00:00.000000000",
"2000-01-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2002-01-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
),
coords={"time": ds.time},
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
# Compare result of the method against the expected.
result = ds.temporal._add_operation_attrs(ds.ts)
expected = ds.ts.copy()
expected.attrs.update(
{
"operation": "temporal_avg",
"mode": ds.temporal._mode,
"freq": ds.temporal._freq,
"weighted": "True",
"center_times": "True",
"dec_mode": "DJF",
"drop_incomplete_djf": "True",
}
)
assert result.identical(expected)
def test_adds_attrs_to_data_var_with_custom_seasons(self):
ds = self.ds.copy()
# Set object attrs required to test the method.
ds.temporal._mode = "climatology"
ds.temporal._freq = "season"
ds.temporal._weighted = True
ds.temporal._center_times = True
ds.temporal._season_config = {
"custom_seasons": {
"JanFebMar": ["Jan", "Feb", "Mar"],
"AprMayJun": ["Apr", "May", "Jun"],
"JulAugSep": ["Jul", "Aug", "Sep"],
"OctNovDec": ["Oct", "Nov", "Dec"],
}
}
ds.temporal._time_grouped = xr.DataArray(
name="year_season",
data=np.array(
[
"2000-01-01T00:00:00.000000000",
"2000-01-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-04-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-07-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2000-10-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2001-01-01T00:00:00.000000000",
"2002-01-01T00:00:00.000000000",
],
dtype="datetime64[ns]",
),
coords={"time": ds.time},
dims=["time"],
attrs={
"axis": "T",
"long_name": "time",
"standard_name": "time",
"bounds": "time_bnds",
},
)
# Compare result of the method against the expected.
result = ds.temporal._add_operation_attrs(ds.ts)
expected = ds.ts.copy()
expected.attrs.update(
{
"operation": "temporal_avg",
"mode": ds.temporal._mode,
"freq": ds.temporal._freq,
"weighted": "True",
"center_times": "True",
"custom_seasons": ["JanFebMar", "AprMayJun", "JulAugSep", "OctNovDec"],
}
)
assert result.identical(expected)
| 36.748624 | 87 | 0.40835 |
79440ce7082c50c78c2002448ef125c872d6cce1 | 21,770 | py | Python | parser/fase2/team03/parse/expressions/expressions_base.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | null | null | null | parser/fase2/team03/parse/expressions/expressions_base.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | null | null | null | parser/fase2/team03/parse/expressions/expressions_base.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 4 | 2020-12-19T17:12:13.000Z | 2021-01-07T20:29:53.000Z | from .expression_enum import OpArithmetic, OpRelational, OpLogic, OpPredicate
from datetime import date, datetime
from parse.errors import Error, ErrorType
from parse.ast_node import ASTNode
import hashlib
from TAC.quadruple import Quadruple
from TAC.tac_enum import *
from parse.symbol_table import generate_tmp
class Numeric(ASTNode):
def __init__(self, val, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.val = val
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return self.val
def generate(self, table, tree):
super().generate(table, tree)
return str(self.val)
class NumericPositive(ASTNode):
def __init__(self, val, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.val = val
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
self.val = self.val.execute(table, tree)
print(type(self.val))
if type(self.val) == int or type(self.val) == float:
return self.val * 1
else:
raise Error(self.line, self.column, ErrorType.SEMANTIC, 'TypeError: must be number')
def generate(self, table, tree):
super().generate(table, tree)
return f'+{self.val.generate(table, tree)}'
class NumericNegative(ASTNode):
def __init__(self, val, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.val = val
self.graph_ref = graph_ref
def execute(self, table, tree):
self.val = self.val.execute(table, tree)
if type(self.val) == int or type(self.val) == float:
return self.val * -1
else:
raise Error(self.line, self.column, ErrorType.SEMANTIC, 'TypeError: must be number')
def generate(self, table, tree):
super().generate(table, tree)
return f'-{self.val.generate(table, tree)}'
class Text(ASTNode):
def __init__(self, val, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.val = val
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return self.val
def generate(self, table, tree):
super().generate(table, tree)
return f"'{self.val}'"
class BoolAST(ASTNode):
def __init__(self, val, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.val = (str(val).upper() == "TRUE")
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return self.val
def generate(self, table, tree):
super().generate(table, tree)
return 'TRUE' if self.val else 'FALSE'
class DateAST(ASTNode):
def __init__(self, val, option, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.val = val
self.option = option
self.graph_ref = graph_ref
try:
self.val = datetime.strptime(val, '%Y-%m-%d %H:%M:%S')
if self.option == 'YEAR':
self.result = self.val.year
elif self.option == 'HOUR':
self.result = self.val.hour
elif self.option == 'MINUTE':
self.result = self.val.minute
elif self.option == 'SECOND':
self.result = self.val.second
elif self.option == 'MONTH':
self.result = self.val.month
elif self.option == 'DAY':
self.result = self.val.day
except:
self.result = None
raise Error(self.line, self.column, ErrorType.SEMANTIC, 'it is not a date time format')
def execute(self, table, tree):
super().execute(table, tree)
return self.option + ' ' + str(self.result)
def generate(self, table, tree):
super().generate(table, tree)
return f'EXTRACT ({self.option.generate(table, tree)} {self.val})'
class DateAST_2(ASTNode):
def __init__(self, option, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.option = option
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return str(self.option)
def generate(self, table, tree):
super().generate(table, tree)
return f'{str(self.option)} FROM TIMESTAMP'
class ColumnName(ASTNode):
def __init__(self, tName, cName, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.tName = tName
self.cName = cName
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
fullname = ''
if self.tName is None or self.tName == "":
fullname = self.cName
else:
fullname = self.tName + "." + self.cName # TODO check if is necesary go to symbol table to get the value or check if the object exists
# yes we have to get the value of colname
# if this AST have tree == list it means the execute have to search for the value of colunn
# in this case table have a row to evaluate where exp and tree has the columns header
if isinstance(tree, list):
try:
index = tree.index(fullname)
return table[index]
except:
raise Error(self.line, self.column, ErrorType.RUNTIME,
f'[AST] the name {fullname} is not belong of the selected table(s)')
else:
return fullname
def generate(self, table, tree):
super().generate(table, tree)
fullname = self.cName
if self.tName is not None and self.tName != "":
fullname = f'{self.tName}.{fullname}'
return fullname
class Now(ASTNode):
def __init__(self, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return datetime.now()
def generate(self, table, tree):
super().generate(table, tree)
return 'NOW()'
class NowDate(ASTNode):
def __init__(self, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return str(date.today())
def generate(self, table, tree):
super().generate(table, tree)
return 'CURRENT_DATE'
class NowTime(ASTNode):
def __init__(self, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
now = datetime.now()
return now.strftime("%H:%M:%S")
def generate(self, table, tree):
super().generate(table, tree)
return 'CURRENT_TIME'
class BinaryExpression(ASTNode):
# Class that handles every arithmetic expression
def __init__(self, exp1, exp2, operator, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp1 = exp1
self.exp2 = exp2
self.operator = operator
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
# TODO: Validate type
if self.operator is None: # 'Number' or 'artirmetic function' production for example
return self.exp1.execute(table, tree)
if self.operator == OpArithmetic.PLUS:
return self.exp1.execute(table, tree) + self.exp2.execute(table, tree)
if self.operator == OpArithmetic.MINUS:
return self.exp1.execute(table, tree) - self.exp2.execute(table, tree)
if self.operator == OpArithmetic.TIMES:
return self.exp1.execute(table, tree) * self.exp2.execute(table, tree)
if self.operator == OpArithmetic.DIVIDE:
return self.exp1.execute(table, tree) / self.exp2.execute(table, tree)
if self.operator == OpArithmetic.MODULE:
return self.exp1.execute(table, tree) % self.exp2.execute(table, tree)
if self.operator == OpArithmetic.POWER:
return pow(self.exp1, self.exp2)
def generate(self, table, tree):
super().generate(table, tree)
if tree:
if self.operator is None: # 'Number' or 'artirmetic function' production for example
return self.exp1.generate(table, tree)
if self.operator == OpArithmetic.PLUS:
return f'{self.exp1.execute(table, tree)} + {self.exp2.execute(table, tree)}'
if self.operator == OpArithmetic.MINUS:
return f'{self.exp1.execute(table, tree)} - {self.exp2.execute(table, tree)}'
if self.operator == OpArithmetic.TIMES:
return f'{self.exp1.execute(table, tree)} * {self.exp2.execute(table, tree)}'
if self.operator == OpArithmetic.DIVIDE:
return f'{self.exp1.execute(table, tree)} / {self.exp2.execute(table, tree)}'
if self.operator == OpArithmetic.MODULE:
return f'{self.exp1.execute(table, tree)} % {self.exp2.execute(table, tree)}'
if self.operator == OpArithmetic.POWER:
return f'{self.exp1.execute(table, tree)} ^ {self.exp2.execute(table, tree)}'
else:#TAC
#Classes who return scalar values NOT expressions: Numeric, Text, BoolAST, ColumnName for ID's, expressions_math.py, expressions_trig.py
arg1 = None
arg2 = None
gen_exp1 = self.exp1.generate(table, tree)
if isinstance(gen_exp1,Quadruple):
arg1 = gen_exp1.res
else:
arg1 = gen_exp1 #if isn´t Cuadrupe must be scallar value such as 1,45,'OLC2 100 pts', False
#same as arg2 but with ternary operator syntax ;)
gen_exp2 = self.exp2.generate(table, tree)
arg2 = gen_exp2.res if isinstance(gen_exp2,Quadruple) else gen_exp2
this_tac = Quadruple(self.operator, arg1, arg2, generate_tmp(), OpTAC.ASSIGNMENT)
tree.append(this_tac)
return this_tac
class RelationalExpression(ASTNode):
# Class that handles every relational expression
def __init__(self, exp1, exp2, operator, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp1 = exp1
self.exp2 = exp2
self.operator = operator
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
if self.operator == OpRelational.GREATER:
return self.exp1.execute(table, tree) > self.exp2.execute(table, tree)
if self.operator == OpRelational.LESS:
return self.exp1.execute(table, tree) < self.exp2.execute(table, tree)
if self.operator == OpRelational.EQUALS:
return self.exp1.execute(table, tree) == self.exp2.execute(table, tree)
if self.operator == OpRelational.NOT_EQUALS:
return self.exp1.execute(table, tree) != self.exp2.execute(table, tree)
if self.operator == OpRelational.GREATER_EQUALS:
return self.exp1.execute(table, tree) >= self.exp2.execute(table, tree)
if self.operator == OpRelational.LESS_EQUALS:
return self.exp1.execute(table, tree) <= self.exp2.execute(table, tree)
if self.operator == OpRelational.LIKE: # TODO add execution to [NOT] LIKE, Regex maybe?
return self.exp1.execute(table, tree) == self.exp2.execute(table, tree)
if self.operator == OpRelational.NOT_LIKE:
return self.exp1.execute(table, tree) != self.exp2.execute(table, tree)
def generate(self, table, tree):
super().generate(table, tree)
if tree is None:
if self.operator == OpRelational.GREATER:
return f'{self.exp1.generate(table, tree)} > {self.exp2.execute(table, tree)}'
if self.operator == OpRelational.LESS:
return f'{self.exp1.generate(table, tree)} < {self.exp2.execute(table, tree)}'
if self.operator == OpRelational.EQUALS:
return f'{self.exp1.generate(table, tree)} = {self.exp2.execute(table, tree)}'
if self.operator == OpRelational.NOT_EQUALS:
return f'{self.exp1.generate(table, tree)} != {self.exp2.execute(table, tree)}'
if self.operator == OpRelational.GREATER_EQUALS:
return f'{self.exp1.generate(table, tree)} >= {self.exp2.execute(table, tree)}'
if self.operator == OpRelational.LESS_EQUALS:
return f'{self.exp1.generate(table, tree)} <= {self.exp2.execute(table, tree)}'
if self.operator == OpRelational.LIKE:
return f'{self.exp1.generate(table, tree)} LIKE {self.exp2.execute(table, tree)}'
if self.operator == OpRelational.NOT_LIKE:
return f'{self.exp1.generate(table, tree)} NOT LIKE {self.exp2.execute(table, tree)}'
else:
arg1 = None
arg2 = None
gen_exp1 = self.exp1.generate(table, tree)
arg1 = gen_exp1.res if isinstance(gen_exp1,Quadruple) else gen_exp1
gen_exp2 = self.exp2.generate(table, tree)
arg2 = gen_exp2.res if isinstance(gen_exp2,Quadruple) else gen_exp2
this_tac = Quadruple(self.operator, arg1, arg2, generate_tmp(), OpTAC.ASSIGNMENT)
tree.append(this_tac)
return this_tac
class PredicateExpression(ASTNode): # TODO check operations and call to exceute function
# Class that handles every logic expression
def __init__(self, exp1, exp2, operator, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp1 = exp1
self.exp2 = exp2
self.operator = operator
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
if self.operator == OpPredicate.NULL:
return self.exp1.execute(table, tree) is None
if self.operator == OpPredicate.NOT_NULL:
return self.exp1.execute(table, tree) is not None
if self.operator == OpPredicate.DISTINCT: # Improve logic in order to allow null and 0 to be the same
return self.exp1.execute(table, tree) != self.exp2.execute(table, tree)
if self.operator == OpPredicate.NOT_DISTINCT: # Improve logic in order to allow null and 0 to be the same
return self.exp1.execute(table, tree) == self.exp2.execute(table, tree)
if self.operator == OpPredicate.TRUE:
return self.exp1.execute(table, tree) is True
if self.operator == OpPredicate.NOT_TRUE:
return self.exp1.execute(table, tree) is False
if self.operator == OpPredicate.FALSE:
return self.exp1.execute(table, tree) is False
if self.operator == OpPredicate.NOT_FALSE:
return self.exp1.execute(table, tree) is True
if self.operator == OpPredicate.UNKNOWN: # TODO do actual comparison to Unknown... No ideas right now
return False
if self.operator == OpPredicate.NOT_UNKNOWN: # Same as previous comment about Unknown
return False
def generate(self, table, tree):
super().generate(table, tree)
if tree is None:
if self.operator == OpPredicate.NULL:
return f'{self.exp1.generate(table, tree)} IS NULL'
if self.operator == OpPredicate.NOT_NULL:
return f'{self.exp1.generate(table, tree)} IS NOT NULL'
if self.operator == OpPredicate.DISTINCT:
return f'{self.exp1.generate(table, tree)} IS DISTINCT FROM {self.exp1.generate(table, tree)}'
if self.operator == OpPredicate.NOT_DISTINCT:
return f'{self.exp1.generate(table, tree)} IS NOT DISTINCT FROM {self.exp1.generate(table, tree)}'
if self.operator == OpPredicate.TRUE:
return f'{self.exp1.generate(table, tree)} IS TRUE'
if self.operator == OpPredicate.NOT_TRUE:
return f'{self.exp1.generate(table, tree)} IS NOT TRUE'
if self.operator == OpPredicate.FALSE:
return f'{self.exp1.generate(table, tree)} IS FALSE'
if self.operator == OpPredicate.NOT_FALSE:
return f'{self.exp1.generate(table, tree)} IS NOT FALSE'
if self.operator == OpPredicate.UNKNOWN:
return f'{self.exp1.generate(table, tree)} IS UNKNOWN'
if self.operator == OpPredicate.NOT_UNKNOWN:
return f'{self.exp1.generate(table, tree)} IS NOT UNKNOWN'
else:
pass
class BoolExpression(ASTNode):
def __init__(self, exp1, exp2, operator, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp1 = exp1
self.exp2 = exp2
self.operator = operator
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
exec1 = self.exp1.execute(table, tree)
exec2 = self.exp2.execute(table, tree)
if isinstance(exec1, bool) and isinstance(exec2, bool):
if self.operator == OpLogic.AND:
return exec1 and exec2
if self.operator == OpLogic.OR:
return exec1 or exec2
else:
raise Exception("The result of operation isn't boolean value")
def generate(self, table, tree):
super().generate(table, tree)
exec1 = self.exp1.generate(table, tree)
exec2 = self.exp2.generate(table, tree)
if tree is None:
if self.operator == OpLogic.AND:
return f'{exec1} AND {exec2}'
if self.operator == OpLogic.OR:
return f'{exec1} OR {exec2}'
else:
arg1 = None
arg2 = None
arg1 = exec1.res if isinstance(exec1,Quadruple) else exec1
arg2 = exec2.res if isinstance(exec2,Quadruple) else exec2
this_tac = Quadruple(self.operator, arg1, arg2, generate_tmp(), OpTAC.ASSIGNMENT)
tree.append(this_tac)
return this_tac
class Negation(ASTNode):
def __init__(self, exp1, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp1 = exp1
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
exec1 = self.exp1.execute(table, tree)
if isinstance(exec1, bool):
return not exec1
else:
raise Exception("The result of operation isn't boolean value")
def generate(self, table, tree):
super().generate(table, tree)
exec1 = self.exp1.generate(table, tree)
if tree is None:
if exec1 != 'TRUE' and exec1 != 'FALSE':
raise Exception("The result of operation isn't boolean value")
return 'TRUE' if exec1 == 'FALSE' else 'FALSE'
else:
arg1 = exec1.res if isinstance(exec1,Quadruple) else exec1
this_tac = Quadruple(OpLogic.NOT, arg1, None, generate_tmp(), OpTAC.ASSIGNMENT)
tree.append(this_tac)
return this_tac
class Identifier(ASTNode):
def __init__(self, val, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.val = val
self.graph_ref = graph_ref
# Must return lexeme of ID
def execute(self, table, tree):
super().execute(table, tree)
return self.val
def executeSTVal(self, table, tree): # TODO: Symbol value from ST :S
super().execute(table, tree)
return self.val
def generate(self, table, tree):
super().generate(table, tree)
return self.val
class TypeDef(ASTNode):
def __init__(self, val, min_size, max_size, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.val = val # token name: CHAR, INTEGER,...
self.min_size = min_size,
self.max_size = max_size,
self.graph_ref = graph_ref
# Must return lexeme of ID
def execute(self, table, tree):
super().execute(table, tree)
return self.val
def minSize(self, table, tree):
super().execute(table, tree)
return self.min_size
def maxSize(self, table, tree):
super().execute(table, tree)
return self.max_size
def generate(self, table, tree):
super().generate(table, tree)
return self.val
class Nullable(ASTNode):
def __init__(self, val, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.val = val # True: accept null values other wise False
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
return self.val
def generate(self, table, tree):
super().generate(table, tree)
return self.val
class MD5_(ASTNode):
def __init__(self, exp, line, column, graph_ref):
ASTNode.__init__(self, line, column)
self.exp = exp
self.graph_ref = graph_ref
def execute(self, table, tree):
super().execute(table, tree)
exp = self.exp.execute(table, tree)
try:
return hashlib.md5(exp.encode('utf-8')).hexdigest()
except:
raise (Error(self.line, self.column, ErrorType.SEMANTIC, 'MD5 error'))
def generate(self, table, tree):
super().generate(table, tree)
return f'MD5({self.exp.generate(table, tree)})'
| 39.653916 | 156 | 0.609095 |
79440d65095cdd6b74878b17a2d58b4b076407a5 | 6,810 | py | Python | nevergrad/instrumentation/core.py | enthought/nevergrad | 2a0f778aa316264e785d0e6305a897679b3b97f7 | [
"MIT"
] | null | null | null | nevergrad/instrumentation/core.py | enthought/nevergrad | 2a0f778aa316264e785d0e6305a897679b3b97f7 | [
"MIT"
] | null | null | null | nevergrad/instrumentation/core.py | enthought/nevergrad | 2a0f778aa316264e785d0e6305a897679b3b97f7 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import uuid
import copy
from typing import Any, Tuple, Optional, Dict, Set, TypeVar
import typing as tp
import numpy as np
from nevergrad.common.typetools import ArrayLike
from ..parametrization.utils import Descriptors
from ..parametrization import parameter as p
# pylint: disable=no-value-for-parameter,too-many-ancestors, too-many-instance-attributes
ArgsKwargs = Tuple[Tuple[Any, ...], Dict[str, Any]]
T = TypeVar('T', bound="Variable")
class VarSpecs:
# pylint: disable=too-many-arguments, unused-argument
def __init__(self) -> None:
self.dimension = -1
self.nargs = 1
self.kwargs_keys: Set[str] = set()
self.continuous = True
self.noisy = False
self.name: Optional[str] = None
def update(self,
dimension: Optional[int] = None,
nargs: Optional[int] = None,
kwargs_keys: Optional[Set[str]] = None,
continuous: Optional[bool] = None,
noisy: Optional[bool] = None,
name: Optional[str] = None
) -> None:
for key, value in locals().items():
if key != "self" and value is not None:
setattr(self, key, value)
def _default_checker(*args: Any, **kwargs: Any) -> bool: # pylint: disable=unused-argument
return True
class Variable(p.Instrumentation):
def __init__(self) -> None:
super().__init__()
self._specs = VarSpecs()
# compatibility
self._data: tp.Optional[np.ndarray] = None
self._value: tp.Optional[ArgsKwargs] = None
@property
def data(self) -> np.ndarray:
if self._data is None:
self._data = np.zeros((self.dimension,))
return self._data
def _set_random_state(self, random_state: np.random.RandomState) -> None:
self._random_state = random_state
def _get_name(self) -> str:
if self._specs.name is not None:
return self._specs.name
return repr(self)
def with_name(self: T, name: str) -> T:
"""Sets a name and return the current instrumentation (for chaining)
"""
return self.set_name(name)
def copy(self: T) -> T: # TODO, use deepcopy directly in the code if it works?
"""Return a new instrumentation with the same variable and same name
(but a different random state)
"""
instru = copy.deepcopy(self)
instru._random_state = None
return instru
@property
def dimension(self) -> int:
return self._specs.dimension
@property
def nargs(self) -> int:
return self._specs.nargs
@property
def kwargs_keys(self) -> Set[str]:
return self._specs.kwargs_keys
@property
def continuous(self) -> bool:
return self._specs.continuous
@property
def noisy(self) -> bool:
return self._specs.noisy
def arguments_to_data(self, *args: Any, **kwargs: Any) -> np.ndarray:
"""Converts args and kwargs into data in np.ndarray format
"""
if len(args) != self.nargs:
raise TypeError(f"Expected {self.nargs} arguments ({len(args)} given: {args})")
if self.kwargs_keys != set(kwargs.keys()):
raise TypeError(f"Expected arguments {self.kwargs_keys} ({set(kwargs.keys())} given: {kwargs})")
return self._arguments_to_data(*args, **kwargs)
def _arguments_to_data(self, *args: Any, **kwargs: Any) -> np.ndarray:
raise RuntimeError(f"arguments_to_data is not defined for {self.__class__.__name__}")
def data_to_arguments(self, data: ArrayLike, deterministic: bool = False) -> ArgsKwargs:
"""Converts data to arguments
Parameters
----------
data: ArrayLike (list/tuple of floats, np.ndarray)
the data in the optimization space
deterministic: bool
whether the conversion should be deterministic (some variables can be stochastic, if deterministic=True
the most likely output will be used)
Returns
-------
args: Tuple[Any]
the positional arguments corresponding to the instance initialization positional arguments
kwargs: Dict[str, Any]
the keyword arguments corresponding to the instance initialization keyword arguments
"""
# trigger random_state creation (may require to be propagated to sub-variables
assert self.random_state is not None
array = np.array(data, copy=False)
if array.shape != (self.dimension,):
raise ValueError(f"Unexpected shape {array.shape} of {array} for {self} with dimension {self.dimension}")
return self._data_to_arguments(array, deterministic)
def _data_to_arguments(self, data: np.ndarray, deterministic: bool) -> ArgsKwargs:
raise NotImplementedError
def get_summary(self, data: ArrayLike) -> str: # pylint: disable=unused-argument
output = self.data_to_arguments(np.array(data, copy=False), deterministic=True)
return f"Value {output[0][0]}, from data: {data}"
# compatibility
@property
def value(self) -> tp.Any:
if self._value is None:
self._value = self.data_to_arguments(self.data)
return self._value[0][0]
@value.setter
def value(self, value: tp.Any) -> None:
self._value = (value,), {}
self._data = self.arguments_to_data(value)
@property
def args(self) -> tp.Tuple[tp.Any, ...]:
return (self.value,)
@property
def kwargs(self) -> tp.Dict[str, tp.Any]:
return {}
def _internal_get_standardized_data(self: T, reference: T) -> np.ndarray: # pylint: disable=unused-argument
return self.data - reference.data # type: ignore
def _internal_set_standardized_data(self: T, data: np.ndarray, reference: T, deterministic: bool = False) -> None:
self._data = data + reference.data
self._value = self.data_to_arguments(self.data, deterministic=deterministic)
def _internal_spawn_child(self: T) -> T:
child = copy.deepcopy(self)
child._frozen = False
child.uid = uuid.uuid4().hex
child.parents_uids = []
return child
def _compute_descriptors(self) -> Descriptors:
return Descriptors(continuous=self.continuous, deterministic=not self.noisy)
def mutate(self) -> None:
raise p.NotSupportedError("Please port your code to new parametrization")
def recombine(self: T, *others: T) -> None: # type: ignore
raise p.NotSupportedError("Please port your code to new parametrization")
| 35.842105 | 118 | 0.640822 |
79440e72a7f66fc79afbb6da8c13ae85e03ef714 | 1,108 | py | Python | examples/html-test/app.py | anokidev/hasta | 570f3e35197c3370a0c8f7cc77d81640c8022165 | [
"MIT"
] | 1 | 2022-01-01T18:38:57.000Z | 2022-01-01T18:38:57.000Z | examples/html-test/app.py | anokidev/hasta | 570f3e35197c3370a0c8f7cc77d81640c8022165 | [
"MIT"
] | 8 | 2021-12-31T10:21:42.000Z | 2022-01-02T16:57:14.000Z | examples/html-test/app.py | anokidev/hasta | 570f3e35197c3370a0c8f7cc77d81640c8022165 | [
"MIT"
] | null | null | null | '''
This file is used for examples only.
In this case, a class with 2 functions,
__init__ and html_application.
When the class is initialized, __init__
is called. Which reads the content inside
index.html, which is located inside html
folder. The result is stored inside
self.response_body .
After that, self.response_body is encoded
into bytes, which is decoded back to utf-8
when returned.
html_application functions as the app function
aka the web framework / Python application.
'''
from hasta.http.init import Init
class Apps:
def __init__(self, path):
f = open(path)
self.response_body_html = f.read()
def html_application(self, start_response, environ):
response_body = self.response_body_html.encode()
status = "200 OK"
response_headers = [ ('Content-type', 'text/html') ]
start_response(status, response_headers)
return response_body.decode("utf-8")
apps = Apps('./html/index.html')
run = Init()
run('0.0.0.0', 8443, app = apps.html_application)
| 22.16 | 60 | 0.665162 |
79440e73ef82d04f6c616dae71c9771e1cbeedd2 | 4,392 | py | Python | CIFAR10_evaluation/lbp/linear_classifier.py | ncos/hometasks | 9504ef7ed8fe30b5bc78ca1e423a2b85e46734a1 | [
"MIT"
] | 3 | 2019-02-19T21:21:02.000Z | 2020-01-30T19:49:01.000Z | CIFAR10_evaluation/lbp/linear_classifier.py | ncos/hometasks | 9504ef7ed8fe30b5bc78ca1e423a2b85e46734a1 | [
"MIT"
] | null | null | null | CIFAR10_evaluation/lbp/linear_classifier.py | ncos/hometasks | 9504ef7ed8fe30b5bc78ca1e423a2b85e46734a1 | [
"MIT"
] | null | null | null | import numpy as np
from loss_grad_logistic import *
from loss_grad_softmax import *
from loss_grad_svm import *
class LinearClassifier:
def __init__(self):
self.W = None # set up the weight matrix
def train(self, X, y, method='sgd', batch_size=200, learning_rate=1e-4,
reg = 1e3, num_iters=1000, verbose=False, vectorized=True):
"""
Train linear classifer using batch gradient descent or stochastic gradient descent
Parameters
----------
X: (D x N) array of training data, each column is a training sample with D-dimension.
y: (N, ) 1-dimension array of target data with length N.
method: (string) determine whether using 'bgd' or 'sgd'.
batch_size: (integer) number of training examples to use at each step.
learning_rate: (float) learning rate for optimization.
reg: (float) regularization strength for optimization.
num_iters: (integer) number of steps to take when optimization.
verbose: (boolean) if True, print out the progress (loss) when optimization.
Returns
-------
losses_history: (list) of losses at each training iteration
"""
dim, num_train = X.shape
num_classes = np.max(y) + 1 # assume y takes values 0...K-1 where K is number of classes
if self.W is None:
# initialize the weights with small values
if num_classes == 2: # just need weights for one class
self.W = np.random.randn(1, dim) * 0.001
else: # weigths for each class
self.W = np.random.randn(num_classes, dim) * 0.001
losses_history = []
for i in xrange(num_iters):
if method == 'bgd':
loss, grad = self.loss_grad(X, y, reg, vectorized)
else:
# randomly choose a min-batch of samples
idxs = np.random.choice(num_train, batch_size, replace=False)
loss, grad = self.loss_grad(X[:, idxs], y[idxs], reg, vectorized) # grad => [K x D]
losses_history.append(loss)
# update weights
self.W -= learning_rate * grad # [K x D]
# print self.W
# print 'dsfad', grad.shape
if verbose and (i % 100 == 0):
print 'iteration %d/%d: loss %f' % (i, num_iters, loss)
return losses_history
def predict(self, X):
"""
Predict value of y using trained weights
Parameters
----------
X: (D x N) array of data, each column is a sample with D-dimension.
Returns
-------
pred_ys: (N, ) 1-dimension array of y for N sampels
h_x_mat: Normalized scores
"""
pred_ys = np.zeros(X.shape[1])
f_x_mat = self.W.dot(X)
if self.__class__.__name__ == 'Logistic':
pred_ys = f_x_mat.squeeze() >=0
else: # multiclassification
pred_ys = np.argmax(f_x_mat, axis=0)
# normalized score
h_x_mat = 1.0 / (1.0 + np.exp(-f_x_mat)) # [1, N]
h_x_mat = h_x_mat.squeeze()
return pred_ys, h_x_mat
def loss_grad(self, X, y, reg, vectorized=True):
"""
Compute the loss and gradients.
Parameters
----------
The same as self.train()
Returns
-------
a tuple of two items (loss, grad)
loss: (float)
grad: (array) with respect to self.W
"""
pass
# Subclasses of linear classifier
class Logistic(LinearClassifier):
"""A subclass for binary classification using logistic function"""
def loss_grad(self, X, y, reg, vectorized=True):
if vectorized:
return loss_grad_logistic_vectorized(self.W, X, y, reg)
else:
return loss_grad_logistic_naive(self.W, X, y, reg)
class Softmax(LinearClassifier):
"""A subclass for multi-classicication using Softmax function"""
def loss_grad(self, X, y, reg, vectorized=True):
if vectorized:
return loss_grad_softmax_vectorized(self.W, X, y, reg)
else:
return loss_grad_softmax_naive(self.W, X, y, reg)
class SVM(LinearClassifier):
"""A subclass for multi-classicication using SVM function"""
def loss_grad(self, X, y, reg, vectorized=True):
return loss_grad_svm_vectorized(self.W, X, y, reg)
| 34.3125 | 99 | 0.589253 |
79440e877996a9620501cd2f769d437ce9c60c44 | 5,950 | py | Python | nintendo/nex/service.py | Cuyler36/NintendoClients | d38986674ecc4dec624694649361f1f334901020 | [
"MIT"
] | null | null | null | nintendo/nex/service.py | Cuyler36/NintendoClients | d38986674ecc4dec624694649361f1f334901020 | [
"MIT"
] | null | null | null | nintendo/nex/service.py | Cuyler36/NintendoClients | d38986674ecc4dec624694649361f1f334901020 | [
"MIT"
] | null | null | null |
from nintendo.common import scheduler
from nintendo.nex import prudp, streams, kerberos, common
import random
import struct
import time
import logging
logger = logging.getLogger(__name__)
class RMCClient:
def __init__(self, settings, sock=None):
self.settings = settings
self.sock = sock
if not self.sock:
self.sock = prudp.RVSecureClient(settings)
if not isinstance(self.sock, prudp.RVSecureClient):
raise TypeError("RMC protocol must lie on top of RVSecure client")
self.servers = {}
self.pid = None
self.call_id = 0
self.responses = {}
self.socket_event = None
def register_server(self, server):
if server.PROTOCOL_ID in self.servers:
raise ValueError("Server with protocol id 0x%X already exists" %server.PROTOCOL_ID)
self.servers[server.PROTOCOL_ID] = server
def connect(self, host, port, stream_id, ticket=None):
if not self.sock.connect(host, port, stream_id, ticket):
return False
self.socket_event = scheduler.add_socket(self.handle_recv, self.sock)
return True
def accept(self):
if self.sock.server_ticket:
self.pid = self.sock.server_ticket.source_pid
self.socket_event = scheduler.add_socket(self.handle_recv, self.sock)
return True
def close(self):
if self.socket_event:
scheduler.remove(self.socket_event)
self.sock.close()
def stream_id(self): return self.sock.local_port
def is_connected(self): return self.sock.is_connected()
def local_address(self): return self.sock.local_address()
def remote_address(self): return self.sock.remote_address()
def handle_recv(self, data):
if not data:
logger.debug("Connection was closed")
scheduler.remove(self.socket_event)
return
stream = streams.StreamIn(data, self.settings)
length = stream.u32()
protocol_id = stream.u8()
if protocol_id & 0x80:
self.handle_request(protocol_id & 0x7F, stream)
else:
self.handle_response(protocol_id, stream)
def init_request(self, protocol_id, method_id):
self.call_id += 1
stream = streams.StreamOut(self.settings)
stream.u8(protocol_id | 0x80)
stream.u32(self.call_id)
stream.u32(method_id)
return stream, self.call_id
def init_response(self, protocol_id, call_id, method_id, error=None):
stream = streams.StreamOut(self.settings)
stream.u8(protocol_id)
if error:
stream.u8(0)
stream.result(error)
stream.u32(call_id)
else:
stream.u8(1)
stream.u32(call_id)
stream.u32(method_id | 0x8000)
return stream
def send_message(self, stream):
if self.sock.is_connected():
self.sock.send(struct.pack("I", len(stream.get())) + stream.get())
else:
raise RuntimeError("Can't send message on disconnected service client")
def handle_request(self, protocol_id, stream):
call_id = stream.u32()
method_id = stream.u32()
logger.debug("Received RMC request: protocol=%i, call=%i, method=%i", protocol_id, call_id, method_id)
if protocol_id in self.servers:
response = self.init_response(protocol_id, call_id, method_id)
try:
result = self.servers[protocol_id].handle(self.pid, method_id, stream, response)
except common.RMCError as e:
logger.info("RMC failed with error 0x%08X (%s)" %(e.error_code, e.error_name))
result = common.Result(e.error_code)
except Exception as e:
logger.error("Exception occurred while handling method call")
import traceback
traceback.print_exc()
if isinstance(e, TypeError): result = common.Result("PythonCore::TypeError")
elif isinstance(e, IndexError): result = common.Result("PythonCore::IndexError")
elif isinstance(e, MemoryError): result = common.Result("PythonCore::MemoryError")
elif isinstance(e, KeyError): result = common.Result("PythonCore::KeyError")
else: result = common.Result("PythonCore::Exception")
else:
logger.warning("Received RMC request with unsupported protocol id: 0x%X", protocol_id)
result = common.Result("Core::NotImplemented")
if result and result.is_error():
response = self.init_response(protocol_id, call_id, method_id, result)
self.send_message(response)
def handle_response(self, protocol_id, stream):
success = stream.u8()
if not success:
result = stream.result()
call_id = stream.u32()
logger.warning("RMC failed with error code 0x%08X", result.code())
self.responses[call_id] = (result, None)
else:
call_id = stream.u32()
method_id = stream.u32() & 0x7FFF
logger.debug("Received RMC response: protocol=%i, call=%i, method=%i", protocol_id, call_id, method_id)
self.responses[call_id] = (None, stream)
def get_response(self, call_id, timeout=5):
start = time.monotonic()
while call_id not in self.responses:
if not self.sock.is_connected():
raise ConnectionError("RMC failed because the PRUDP connection was closed")
scheduler.update()
now = time.monotonic()
if now - start >= timeout:
raise RuntimeError("RMC request timed out")
result, stream = self.responses.pop(call_id)
if result:
result.raise_if_error()
return stream
class RMCServer:
def __init__(self, settings, server=None):
self.settings = settings
self.server = server
if not self.server:
self.server = prudp.RVSecureServer(settings)
if not isinstance(self.server, prudp.RVSecureServer):
raise TypeError("RMC server must lie on top of RVSecure server")
self.protocols = {}
def register_protocol(self, protocol):
if protocol.PROTOCOL_ID in self.protocols:
raise ValueError("Server protocol with id 0x%X already exists" %protocol.PROTOCOL_ID)
self.protocols[protocol.PROTOCOL_ID] = protocol
def start(self, host, port, stream_id=1, key=None):
self.server.start(host, port, stream_id, key)
scheduler.add_server(self.handle, self.server)
def handle(self, socket):
client = RMCClient(self.settings, socket)
if client.accept():
for protocol in self.protocols.values():
client.register_server(protocol)
| 30.670103 | 106 | 0.723361 |
79440f0342f24e95effb95e8b92cefa04f0d2503 | 4,259 | py | Python | test/functional/rpc_net.py | nhoussay/groom | 87e69a80c1e15e4683ef64c55ac65547910b1c97 | [
"MIT"
] | null | null | null | test/functional/rpc_net.py | nhoussay/groom | 87e69a80c1e15e4683ef64c55ac65547910b1c97 | [
"MIT"
] | null | null | null | test/functional/rpc_net.py | nhoussay/groom | 87e69a80c1e15e4683ef64c55ac65547910b1c97 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC calls related to net.
Tests correspond to code in rpc/net.cpp.
"""
from test_framework.test_framework import groomTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
assert_raises_rpc_error,
connect_nodes,
disconnect_nodes,
p2p_port,
wait_until,
)
class NetTest(groomTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def run_test(self):
self.log.info("Connect nodes both way")
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 0)
self._test_connection_count()
self._test_getnettotals()
self._test_getnetworkinginfo()
self._test_getaddednodeinfo()
#self._test_getpeerinfo()
def _test_connection_count(self):
assert_equal(self.nodes[0].getconnectioncount(), 2)
def _test_getnettotals(self):
# getnettotals totalbytesrecv and totalbytessent should be
# consistent with getpeerinfo. Since the RPC calls are not atomic,
# and messages might have been recvd or sent between RPC calls, call
# getnettotals before and after and verify that the returned values
# from getpeerinfo are bounded by those values.
net_totals_before = self.nodes[0].getnettotals()
peer_info = self.nodes[0].getpeerinfo()
net_totals_after = self.nodes[0].getnettotals()
assert_equal(len(peer_info), 2)
peers_recv = sum([peer['bytesrecv'] for peer in peer_info])
peers_sent = sum([peer['bytessent'] for peer in peer_info])
assert_greater_than_or_equal(peers_recv, net_totals_before['totalbytesrecv'])
assert_greater_than_or_equal(net_totals_after['totalbytesrecv'], peers_recv)
assert_greater_than_or_equal(peers_sent, net_totals_before['totalbytessent'])
assert_greater_than_or_equal(net_totals_after['totalbytessent'], peers_sent)
# test getnettotals and getpeerinfo by doing a ping
# the bytes sent/received should change
# note ping and pong are 32 bytes each
self.nodes[0].ping()
wait_until(lambda: (self.nodes[0].getnettotals()['totalbytessent'] >= net_totals_after['totalbytessent'] + 32 * 2), timeout=1)
wait_until(lambda: (self.nodes[0].getnettotals()['totalbytesrecv'] >= net_totals_after['totalbytesrecv'] + 32 * 2), timeout=1)
peer_info_after_ping = self.nodes[0].getpeerinfo()
def _test_getnetworkinginfo(self):
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
disconnect_nodes(self.nodes[0], 1)
# Wait a bit for all sockets to close
wait_until(lambda: self.nodes[0].getnetworkinfo()['connections'] == 0, timeout=3)
self.log.info("Connect nodes both way")
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[1], 0)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
def _test_getaddednodeinfo(self):
assert_equal(self.nodes[0].getaddednodeinfo(True), [])
# add a node (node2) to node0
ip_port = "127.0.0.1:{}".format(p2p_port(2))
self.nodes[0].addnode(ip_port, 'add')
# check that the node has indeed been added
added_nodes = self.nodes[0].getaddednodeinfo(True, ip_port)
assert_equal(len(added_nodes), 1)
assert_equal(added_nodes[0]['addednode'], ip_port)
# check that a non-existent node returns an error
assert_raises_rpc_error(-24, "Node has not been added", self.nodes[0].getaddednodeinfo, True, '1.1.1.1')
def _test_getpeerinfo(self):
peer_info = [x.getpeerinfo() for x in self.nodes]
# check both sides of bidirectional connection between nodes
# the address bound to on one side will be the source address for the other node
assert_equal(peer_info[0][0]['addrbind'], peer_info[1][0]['addr'])
assert_equal(peer_info[1][0]['addrbind'], peer_info[0][0]['addr'])
if __name__ == '__main__':
NetTest().main()
| 42.59 | 134 | 0.687955 |
79440fb40a5e2e678b5cd229cb1435717c866b01 | 8,886 | py | Python | src/PlugIns/PE/CertficatePlug.py | codexgigassys/codex-backend | 1e6b946f05e2fd9ce80b207046fe168f49cc2db2 | [
"MIT"
] | 161 | 2016-08-03T14:25:20.000Z | 2021-08-11T03:18:35.000Z | src/PlugIns/PE/CertficatePlug.py | codexgigassys/codex-backend | 1e6b946f05e2fd9ce80b207046fe168f49cc2db2 | [
"MIT"
] | 115 | 2016-08-08T09:24:08.000Z | 2020-03-07T06:48:02.000Z | src/PlugIns/PE/CertficatePlug.py | codexgigassys/codex-backend | 1e6b946f05e2fd9ce80b207046fe168f49cc2db2 | [
"MIT"
] | 46 | 2016-08-04T08:25:28.000Z | 2021-09-05T23:46:55.000Z | # Copyright (C) 2016 Deloitte Argentina.
# This file is part of CodexGigas - https://github.com/codexgigassys/
# See the file 'LICENSE' for copying permission.
import pathmagic
from Sample import Sample
from subprocess import check_output
import binascii
from PlugIns.PlugIn import PlugIn
from Modules.PEFileModule import PEFileModule
import pefile
from pyasn1.codec.der import encoder, decoder
from pyasn1_modules import rfc2315
import logging
class CertficatePlug(PlugIn):
def __init__(self, sample=None):
PlugIn.__init__(self, sample)
def getPath(self):
return "particular_header.certificate"
def getName(self):
return "certificate"
def getVersion(self):
return 2
def process(self):
raw_certificate_file = "./certPlug.temp"
pelib = self._getLibrary(PEFileModule().getName())
if(pelib is None):
return ""
# reset the offset to the table containing the signature
sigoff = 0
# reset the lenght of the table
siglen = 0
# search for the 'IMAGE_DIRECTORY_ENTRY_SECURITY' directory
# probably there is a direct way to find that directory
# but I am not aware of it at the moment
found = False
for s in pelib.__structures__:
if s.name == 'IMAGE_DIRECTORY_ENTRY_SECURITY':
# set the offset to the signature table
sigoff = s.VirtualAddress
# set the length of the table
siglen = s.Size
# print(siglen)
if (siglen >= 8):
found = True
if not found:
return {}
bin_data = self.sample.getBinary()
totsize = len(bin_data)
if sigoff < totsize:
# hmmm, okay we could possibly read this from the PE object
# but is straightforward to just open the file again
# as a file object
# f = open(a,'rb')
# move to the beginning of signature table
# f.seek(sigoff)
# read the signature table
# thesig = f.read(siglen)
thesig = bin_data[sigoff:(sigoff + siglen)]
# close the file
# f.close()
# now the 'thesig' variable should contain the table with
# the following structure
# DWORD dwLength - this is the length of bCertificate
# WORD wRevision
# WORD wCertificateType
# BYTE bCertificate[dwLength] - this contains the PKCS7 signature
# with all the
# lets dump only the PKCS7 signature (without checking the lenght
# with dwLength)
res = {}
length_raw = thesig[:4]
revision_raw = thesig[4:6]
type_raw = thesig[6:8]
raw_certificate = thesig[8:]
res["length"] = int(binascii.hexlify(length_raw), 16)
res["revision"] = int(binascii.hexlify(revision_raw), 16)
res["type"] = int(binascii.hexlify(type_raw), 16)
res["signed"] = True
fd = open(raw_certificate_file, "w")
fd.write(raw_certificate)
fd.close()
cmd = ["openssl", "pkcs7", "-inform", "DER",
"-print_certs", "-text", "-in", raw_certificate_file]
try:
output = check_output(cmd)
except Exception, e:
logging.exception("Plugins: Cert")
return {}
# print(output)
certificates = []
one_cert = {}
iterator = iter(output.split('\n'))
while True:
try:
actual = iterator.next().strip()
# print(actual)
except Exception, e:
break
if(actual.find("Certificate:") == 0):
if(len(one_cert) > 0):
certificates.append(one_cert)
one_cert = {}
elif(actual.find("Serial Number:") == 0):
if(len(actual) > len("Serial Number:") + 2):
hasta = actual.find("(")
serial = actual[len("Serial Number:"):hasta]
serial = serial.strip()
else:
serial = iterator.next().strip()
# print("##%s##"%serial)
one_cert["serial"] = serial.lower()
elif(actual.find("Issuer:") == 0):
s_pos = actual.find(", O=")
if(s_pos >= 0):
f_pos = actual.find(',', s_pos + 1)
if(f_pos < 0):
f_pos = None
issuer_o = actual[s_pos + 4:f_pos]
else:
issuer_o = ""
one_cert["issuer"] = issuer_o.lower()
elif(actual.find("Validity") == 0):
val_in = iterator.next().strip()
val_fin = iterator.next().strip()
one_cert["validity_beg"] = val_in[12:].lower()
one_cert["validity_end"] = val_fin[12:].lower()
elif(actual.find("Subject:") == 0):
s_pos = actual.find(", O=")
if(s_pos >= 0):
f_pos = actual.find(',', s_pos + 1)
if(f_pos < 0):
f_pos = None
subject_o = actual[s_pos + 4:f_pos]
else:
subject_o = ""
one_cert["subject"] = subject_o.lower()
if(len(one_cert) > 0):
certificates.append(one_cert)
res["certificates"] = certificates
return res
else:
return {}
def process2(self):
pe = self._getLibrary(PEFileModule().getName())
if(pe is None):
return ""
# get the security directory entry
address = pe.OPTIONAL_HEADER.DATA_DIRECTORY[
pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']].VirtualAddress
if address > 0:
# Always in DER format AFAIK
derData = pe.write()[address + 8:]
else:
logging.debug("address 0")
return
(contentInfo, rest) = decoder.decode(
derData, asn1Spec=rfc2315.ContentInfo())
contentType = contentInfo.getComponentByName('contentType')
if contentType == rfc2315.signedData:
signedData = decode(contentInfo.getComponentByName(
'content'), asn1Spec=rfc2315.SignedData())
for sd in signedData:
if sd == '':
continue
signerInfos = sd.getComponentByName('signerInfos')
for si in signerInfos:
issuerAndSerial = si.getComponentByName(
'issuerAndSerialNumber')
issuer = issuerAndSerial.getComponentByName(
'issuer').getComponent()
for i in issuer:
for r in i:
at = r.getComponentByName('type')
if rfc2459.id_at_countryName == at:
cn = decode(r.getComponentByName(
'value'), asn1Spec=rfc2459.X520countryName())
print(cn[0])
elif rfc2459.id_at_organizationName == at:
on = decode(r.getComponentByName(
'value'), asn1Spec=rfc2459.X520OrganizationName())
print(on[0].getComponent())
elif rfc2459.id_at_organizationalUnitName == at:
ou = decode(r.getComponentByName(
'value'), asn1Spec=rfc2459.X520OrganizationalUnitName())
print(ou[0].getComponent())
elif rfc2459.id_at_commonName == at:
cn = decode(r.getComponentByName(
'value'), asn1Spec=rfc2459.X520CommonName())
print(cn[0].getComponent())
else:
print at
if __name__ == "__main__":
data = open(source_path + "/Test_files/certificate5.codex", "rb").read()
sample = Sample()
sample.setBinary(data)
modules = {}
pfm = PEFileModule()
modules[pfm.getName()] = pfm
plug = CertficatePlug()
plug.setModules(modules)
plug.setSample(sample)
res = plug.process()
print(res)
# fd=open("certficado.out","wb")
# fd.write(res["bCertificate"])
# fd.close()
| 37.179916 | 88 | 0.497524 |
79440fdf9b15ff0d7a89ce5bc2174b8549ddee52 | 61,336 | py | Python | src/policy_makers/ipmi_over_lan_policy_maker/ipmi_over_lan_policy_maker.py | ugo-emekauwa/cisco-imm-automation-tools | 67b1e752c32ca08b1acb928e68b697b60a62836f | [
"Apache-2.0"
] | null | null | null | src/policy_makers/ipmi_over_lan_policy_maker/ipmi_over_lan_policy_maker.py | ugo-emekauwa/cisco-imm-automation-tools | 67b1e752c32ca08b1acb928e68b697b60a62836f | [
"Apache-2.0"
] | null | null | null | src/policy_makers/ipmi_over_lan_policy_maker/ipmi_over_lan_policy_maker.py | ugo-emekauwa/cisco-imm-automation-tools | 67b1e752c32ca08b1acb928e68b697b60a62836f | [
"Apache-2.0"
] | 1 | 2022-01-07T11:03:13.000Z | 2022-01-07T11:03:13.000Z | """
IPMI Over LAN Policy Maker for Cisco Intersight, v2.0
Author: Ugo Emekauwa
Contact: [email protected], [email protected]
Summary: The IPMI Over LAN Policy Maker for Cisco Intersight automates the
creation of IPMI Over LAN Policies.
GitHub Repository: https://github.com/ugo-emekauwa/cisco-imm-automation-tools
"""
########################
# MODULE REQUIREMENT 1 #
########################
"""
For the following variable below named key_id, please fill in between
the quotes your Intersight API Key ID.
Here is an example:
key_id = "5c89885075646127773ec143/5c82fc477577712d3088eb2f/5c8987b17577712d302eaaff"
"""
key_id = ""
########################
# MODULE REQUIREMENT 2 #
########################
"""
For the following variable below named key, please fill in between
the quotes your system's file path to your Intersight API key "SecretKey.txt"
file.
Here is an example:
key = "C:\\Users\\demouser\\Documents\\SecretKey.txt"
"""
key = ""
########################
# MODULE REQUIREMENT 3 #
########################
"""
Provide the required configuration settings to create the
IPMI Over LAN Policy on Cisco Intersight. Remove the sample
values and replace them with your own, where applicable.
"""
####### Start Configuration Settings - Provide values for the variables listed below. #######
# General Settings
ipmi_over_lan_policy_name = "IPMI-Over-LAN-Policy-1"
ipmi_over_lan_policy_description = "A Cisco Intersight IPMI Over LAN Policy generated by the IPMI Over LAN Policy Maker."
ipmi_over_lan_policy_organization = "default"
ipmi_over_lan_policy_tags = {"Org": "IT", "Dept": "DevOps"} # Empty the ipmi_over_lan_policy_tags dictionary if no tags are needed, for example: ipmi_over_lan_policy_tags = {}
# Policy Detail Settings
enable_ipmi_over_lan = True
privilege_level = "admin" # Options: "admin", "user", and "read-only"
encryption_key = ""
# Intersight Base URL Setting (Change only if using the Intersight Virtual Appliance)
intersight_base_url = "https://www.intersight.com/api/v1"
# UCS Server Profile Attachment Settings
ucs_server_profile_name = ""
####### Finish Configuration Settings - The required value entries are complete. #######
#############################################################################################################################
#############################################################################################################################
import sys
import traceback
import json
import copy
import intersight
import re
# Function to get Intersight API client as specified in the Intersight Python SDK documentation for OpenAPI 3.x
## Modified to align with overall formatting and try/except blocks added for additional error handling
def get_api_client(api_key_id,
api_secret_file,
endpoint="https://intersight.com"
):
try:
with open(api_secret_file, 'r') as f:
api_key = f.read()
if re.search('BEGIN RSA PRIVATE KEY', api_key):
# API Key v2 format
signing_algorithm = intersight.signing.ALGORITHM_RSASSA_PKCS1v15
signing_scheme = intersight.signing.SCHEME_RSA_SHA256
hash_algorithm = intersight.signing.HASH_SHA256
elif re.search('BEGIN EC PRIVATE KEY', api_key):
# API Key v3 format
signing_algorithm = intersight.signing.ALGORITHM_ECDSA_MODE_DETERMINISTIC_RFC6979
signing_scheme = intersight.signing.SCHEME_HS2019
hash_algorithm = intersight.signing.HASH_SHA256
configuration = intersight.Configuration(
host=endpoint,
signing_info=intersight.signing.HttpSigningConfiguration(
key_id=api_key_id,
private_key_path=api_secret_file,
signing_scheme=signing_scheme,
signing_algorithm=signing_algorithm,
hash_algorithm=hash_algorithm,
signed_headers=[
intersight.signing.HEADER_REQUEST_TARGET,
intersight.signing.HEADER_HOST,
intersight.signing.HEADER_DATE,
intersight.signing.HEADER_DIGEST,
]
)
)
except Exception:
print("\nA configuration error has occurred!\n")
print("Unable to access the Intersight API Key.")
print("Exiting due to the Intersight API Key being unavailable.\n")
print("Please verify that the correct API Key ID and API Key have "
"been entered, then re-attempt execution.\n")
print("Exception Message: ")
traceback.print_exc()
sys.exit(0)
return intersight.ApiClient(configuration)
# Establish function to test for the availability of the Intersight API and Intersight account
def test_intersight_api_service(intersight_api_key_id,
intersight_api_key,
intersight_base_url="https://www.intersight.com/api/v1",
preconfigured_api_client=None
):
"""This is a function to test the availability of the Intersight API and
Intersight account. The tested Intersight account contains the user who is
the owner of the provided Intersight API Key and Key ID.
Args:
intersight_api_key_id (str):
The ID of the Intersight API key.
intersight_api_key (str):
The system file path of the Intersight API key.
intersight_base_url (str):
Optional; The base URL for Intersight API paths. The default value
is "https://www.intersight.com/api/v1". This value typically only
needs to be changed if using the Intersight Virtual Appliance. The
default value is "https://www.intersight.com/api/v1".
preconfigured_api_client ("ApiClient"):
Optional; An ApiClient class instance which handles
Intersight client-server communication through the use of API keys.
The default value is None. If a preconfigured_api_client argument
is provided, empty strings ("") or None can be provided for the
intersight_api_key_id, intersight_api_key, and intersight_base_url
arguments.
Returns:
A string of the name for the Intersight account tested, verifying the
Intersight API service is up and the Intersight account is accessible.
Raises:
Exception:
An exception occurred due to an issue with the provided API Key
and/or API Key ID.
"""
# Define Intersight SDK ApiClient variable
if preconfigured_api_client is None:
api_client = get_api_client(api_key_id=intersight_api_key_id,
api_secret_file=intersight_api_key,
endpoint=intersight_base_url
)
else:
api_client = preconfigured_api_client
try:
# Check that Intersight Account is accessible
print("Testing access to the Intersight API by verifying the "
"Intersight account information...")
api_client.call_api(resource_path="/iam/Accounts",
method="GET",
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
response = api_client.last_response.data
iam_account = json.loads(response)
if api_client.last_response.status != 200:
print("\nThe Intersight API and Account Availability Test did not "
"pass.")
print("The Intersight account information could not be verified.")
print("Exiting due to the Intersight account being unavailable.\n")
print("Please verify that the correct API Key ID and API Key have "
"been entered, then re-attempt execution.\n")
sys.exit(0)
else:
intersight_account_name = iam_account["Results"][0]["Name"]
print("The Intersight API and Account Availability Test has "
"passed.\n")
print(f"The Intersight account named '{intersight_account_name}' "
"has been found.")
return intersight_account_name
except Exception:
print("\nA configuration error has occurred!\n")
print("Unable to access the Intersight API.")
print("Exiting due to the Intersight API being unavailable.\n")
print("Please verify that the correct API Key ID and API Key have "
"been entered, then re-attempt execution.\n")
print("Exception Message: ")
traceback.print_exc()
sys.exit(0)
# Establish function to retrieve the MOID of a specific Intersight API object by name
def intersight_object_moid_retriever(intersight_api_key_id,
intersight_api_key,
object_name,
intersight_api_path,
object_type="object",
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
preconfigured_api_client=None
):
"""This is a function to retrieve the MOID of Intersight objects
using the Intersight API.
Args:
intersight_api_key_id (str):
The ID of the Intersight API key.
intersight_api_key (str):
The system file path of the Intersight API key.
object_name (str):
The name of the Intersight object.
intersight_api_path (str):
The Intersight API path of the Intersight object.
object_type (str):
Optional; The type of Intersight object. The default value is
"object".
organization (str):
Optional; The Intersight organization of the Intersight object.
The default value is "default".
intersight_base_url (str):
Optional; The base URL for Intersight API paths. The default value
is "https://www.intersight.com/api/v1". This value typically only
needs to be changed if using the Intersight Virtual Appliance.
preconfigured_api_client ("ApiClient"):
Optional; An ApiClient class instance which handles
Intersight client-server communication through the use of API keys.
The default value is None. If a preconfigured_api_client argument
is provided, empty strings ("") or None can be provided for the
intersight_api_key_id, intersight_api_key, and intersight_base_url
arguments.
Returns:
A string of the MOID for the provided Intersight object.
Raises:
Exception:
An exception occurred due to an issue accessing the Intersight API
path. The status code or error message will be specified.
"""
# Define Intersight SDK ApiClient variable
if preconfigured_api_client is None:
api_client = get_api_client(api_key_id=intersight_api_key_id,
api_secret_file=intersight_api_key,
endpoint=intersight_base_url
)
else:
api_client = preconfigured_api_client
try:
# Retrieve the Intersight Account name
api_client.call_api(resource_path="/iam/Accounts",
method="GET",
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
response = api_client.last_response.data
iam_account = json.loads(response)
if api_client.last_response.status != 200:
print("The provided Intersight account information could not be "
"accessed.")
print("Exiting due to the Intersight account being unavailable.\n")
print("Please verify that the correct API Key ID and API Key have "
"been entered, then re-attempt execution.\n")
sys.exit(0)
else:
intersight_account_name = iam_account["Results"][0]["Name"]
except Exception:
print("\nA configuration error has occurred!\n")
print("Unable to access the Intersight API.")
print("Exiting due to the Intersight API being unavailable.\n")
print("Please verify that the correct API Key ID and API Key have "
"been entered, then re-attempt execution.\n")
sys.exit(0)
# Retrieving the provided object from Intersight...
full_intersight_api_path = f"/{intersight_api_path}"
try:
api_client.call_api(resource_path=full_intersight_api_path,
method="GET",
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
response = api_client.last_response.data
intersight_objects = json.loads(response)
# The Intersight API resource path has been accessed successfully.
except Exception:
print("\nA configuration error has occurred!\n")
print("There was an issue retrieving the "
f"{object_type} from Intersight.")
print("Unable to access the provided Intersight API resource path "
f"'{intersight_api_path}'.")
print("Please review and resolve any error messages, then re-attempt "
"execution.\n")
print("Exception Message: ")
traceback.print_exc()
sys.exit(0)
if intersight_objects.get("Results"):
for intersight_object in intersight_objects.get("Results"):
if intersight_object.get("Organization"):
provided_organization_moid = intersight_object_moid_retriever(intersight_api_key_id=None,
intersight_api_key=None,
object_name=organization,
intersight_api_path="organization/Organizations",
object_type="Organization",
preconfigured_api_client=api_client
)
if intersight_object.get("Organization", {}).get("Moid") == provided_organization_moid:
if intersight_object.get("Name") == object_name:
intersight_object_moid = intersight_object.get("Moid")
# The provided object and MOID has been identified and retrieved.
return intersight_object_moid
else:
if intersight_object.get("Name") == object_name:
intersight_object_moid = intersight_object.get("Moid")
# The provided object and MOID has been identified and retrieved.
return intersight_object_moid
else:
print("\nA configuration error has occurred!\n")
print(f"The provided {object_type} named '{object_name}' was not "
"found.")
print("Please check the Intersight Account named "
f"{intersight_account_name}.")
print("Verify through the API or GUI that the needed "
f"{object_type} is present.")
print(f"If the needed {object_type} is missing, please create it.")
print("Once the issue has been resolved, re-attempt execution.\n")
sys.exit(0)
else:
print("\nA configuration error has occurred!\n")
print(f"The provided {object_type} named '{object_name}' was not "
"found.")
print(f"No requested {object_type} instance is currently available in "
f"the Intersight account named {intersight_account_name}.")
print("Please check the Intersight Account named "
f"{intersight_account_name}.")
print(f"Verify through the API or GUI that the needed {object_type} "
"is present.")
print(f"If the needed {object_type} is missing, please create it.")
print("Once the issue has been resolved, re-attempt execution.\n")
sys.exit(0)
# Establish function to retrieve all instances of a particular Intersight API object type
def get_intersight_objects(intersight_api_key_id,
intersight_api_key,
intersight_api_path,
object_type="object",
intersight_base_url="https://www.intersight.com/api/v1",
preconfigured_api_client=None
):
"""This is a function to perform an HTTP GET on all objects under an
available Intersight API type.
Args:
intersight_api_key_id (str):
The ID of the Intersight API key.
intersight_api_key (str):
The system file path of the Intersight API key.
intersight_api_path (str):
The path to the targeted Intersight API object type. For example,
to specify the Intersight API type for adapter configuration
policies, enter "adapter/ConfigPolicies". More API types can be
found in the Intersight API reference library at
https://intersight.com/apidocs/introduction/overview/.
object_type (str):
Optional; The type of Intersight object. The default value is
"object".
intersight_base_url (str):
Optional; The base URL for Intersight API paths. The default value
is "https://www.intersight.com/api/v1". This value typically only
needs to be changed if using the Intersight Virtual Appliance.
preconfigured_api_client ("ApiClient"):
Optional; An ApiClient class instance which handles
Intersight client-server communication through the use of API keys.
The default value is None. If a preconfigured_api_client argument
is provided, empty strings ("") or None can be provided for the
intersight_api_key_id, intersight_api_key, and intersight_base_url
arguments.
Returns:
A dictionary containing all objects of the specified API type. If the
API type is inaccessible, an implicit value of None will be returned.
Raises:
Exception:
An exception occurred due to an issue accessing the Intersight API
path. The status code or error message will be specified.
"""
# Define Intersight SDK ApiClient variable
if preconfigured_api_client is None:
api_client = get_api_client(api_key_id=intersight_api_key_id,
api_secret_file=intersight_api_key,
endpoint=intersight_base_url
)
else:
api_client = preconfigured_api_client
# Retrieving the provided object from Intersight...
full_intersight_api_path = f"/{intersight_api_path}"
try:
api_client.call_api(resource_path=full_intersight_api_path,
method="GET",
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
response = api_client.last_response.data
intersight_objects = json.loads(response)
# The Intersight API resource path has been accessed successfully.
return intersight_objects
except Exception:
print("\nA configuration error has occurred!\n")
print(f"There was an issue retrieving the requested {object_type} "
"instances from Intersight.")
print("Unable to access the provided Intersight API resource path "
f"'{intersight_api_path}'.")
print("Please review and resolve any error messages, then re-attempt "
"execution.\n")
print("Exception Message: ")
traceback.print_exc()
sys.exit(0)
# Establish function to retrieve a particular instance of a particular Intersight API object type
def get_single_intersight_object(intersight_api_key_id,
intersight_api_key,
intersight_api_path,
object_moid,
object_type="object",
intersight_base_url="https://www.intersight.com/api/v1",
preconfigured_api_client=None
):
"""This is a function to perform an HTTP GET on a single object under an
available Intersight API type.
Args:
intersight_api_key_id (str):
The ID of the Intersight API key.
intersight_api_key (str):
The system file path of the Intersight API key.
intersight_api_path (str):
The path to the targeted Intersight API object type. For example,
to specify the Intersight API type for adapter configuration
policies, enter "adapter/ConfigPolicies". More API types can be
found in the Intersight API reference library at
https://intersight.com/apidocs/introduction/overview/.
object_moid (str):
The MOID of the single Intersight object.
object_type (str):
Optional; The type of Intersight object. The default value is
"object".
intersight_base_url (str):
Optional; The base URL for Intersight API paths. The default value
is "https://www.intersight.com/api/v1". This value typically only
needs to be changed if using the Intersight Virtual Appliance.
preconfigured_api_client ("ApiClient"):
Optional; An ApiClient class instance which handles
Intersight client-server communication through the use of API keys.
The default value is None. If a preconfigured_api_client argument
is provided, empty strings ("") or None can be provided for the
intersight_api_key_id, intersight_api_key, and intersight_base_url
arguments.
Returns:
A dictionary containing all objects of the specified API type. If the
API type is inaccessible, an implicit value of None will be returned.
Raises:
Exception:
An exception occurred due to an issue accessing the Intersight API
path. The status code or error message will be specified.
"""
# Define Intersight SDK ApiClient variable
if preconfigured_api_client is None:
api_client = get_api_client(api_key_id=intersight_api_key_id,
api_secret_file=intersight_api_key,
endpoint=intersight_base_url
)
else:
api_client = preconfigured_api_client
# Retrieving the provided object from Intersight...
full_intersight_api_path = f"/{intersight_api_path}/{object_moid}"
try:
api_client.call_api(resource_path=full_intersight_api_path,
method="GET",
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
response = api_client.last_response.data
single_intersight_object = json.loads(response)
# The Intersight API resource path has been accessed successfully.
return single_intersight_object
except Exception:
print("\nA configuration error has occurred!\n")
print(f"There was an issue retrieving the requested {object_type} "
"instance from Intersight.")
print("Unable to access the provided Intersight API resource path "
f"'{intersight_api_path}'.")
print("Please review and resolve any error messages, then re-attempt "
"execution.\n")
print("Exception Message: ")
traceback.print_exc()
sys.exit(0)
# Establish Maker specific classes and functions
class UcsPolicy:
"""This class is used to configure a UCS Policy in Intersight.
"""
object_type = "UCS Policy"
intersight_api_path = None
subobject_types = None
subobject_attribute_maps = None
object_variable_value_maps = None
def __init__(self,
intersight_api_key_id,
intersight_api_key,
policy_name,
policy_description="",
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
tags=None,
preconfigured_api_client=None
):
self.intersight_api_key_id = intersight_api_key_id
self.intersight_api_key = intersight_api_key
self.policy_name = policy_name
self.policy_description = policy_description
self.organization = organization
self.intersight_base_url = intersight_base_url
if tags is None:
self.tags = {}
else:
self.tags = tags
if preconfigured_api_client is None:
self.api_client = get_api_client(api_key_id=intersight_api_key_id,
api_secret_file=intersight_api_key,
endpoint=intersight_base_url
)
else:
self.api_client = preconfigured_api_client
self.intersight_api_body = {
"Name": self.policy_name,
"Description": self.policy_description
}
def __repr__(self):
return (
f"{self.__class__.__name__}"
f"('{self.intersight_api_key_id}', "
f"'{self.intersight_api_key}', "
f"'{self.policy_name}', "
f"'{self.policy_description}', "
f"'{self.organization}', "
f"'{self.intersight_base_url}', "
f"{self.tags}, "
f"{self.api_client})"
)
def __str__(self):
return f"{self.__class__.__name__} class object for '{self.policy_name}'"
def _post_intersight_object(self):
"""This is a function to configure an Intersight object by
performing a POST through the Intersight API.
Returns:
A string with a statement indicating whether the POST method
was successful or failed.
Raises:
Exception:
An exception occurred while performing the API call.
The status code or error message will be specified.
"""
full_intersight_api_path = f"/{self.intersight_api_path}"
try:
self.api_client.call_api(resource_path=full_intersight_api_path,
method="POST",
body=self.intersight_api_body,
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
print(f"The configuration of the base {self.object_type} "
"has completed.")
return "The POST method was successful."
except intersight.exceptions.ApiException as error:
if error.status == 409:
existing_intersight_object_name = self.intersight_api_body.get("Name", "object")
print(f"The targeted {self.object_type} appears to already "
"exist.")
print("An attempt will be made to update the pre-existing "
f"{existing_intersight_object_name}...")
try:
existing_intersight_object_moid = intersight_object_moid_retriever(intersight_api_key_id=None,
intersight_api_key=None,
object_name=existing_intersight_object_name,
intersight_api_path=self.intersight_api_path,
object_type=self.object_type,
preconfigured_api_client=self.api_client
)
# Update full Intersight API path with the MOID of the existing object
full_intersight_api_path_with_moid = f"/{self.intersight_api_path}/{existing_intersight_object_moid}"
self.api_client.call_api(resource_path=full_intersight_api_path_with_moid,
method="POST",
body=self.intersight_api_body,
auth_settings=['cookieAuth', 'http_signature', 'oAuth2', 'oAuth2']
)
print(f"The update of the {self.object_type} has "
"completed.")
print(f"The pre-existing {existing_intersight_object_name} "
"has been updated.")
return "The POST method was successful."
except Exception:
print("\nA configuration error has occurred!\n")
print(f"Unable to update the {self.object_type} under the "
"Intersight API resource path "
f"'{full_intersight_api_path_with_moid}'.\n")
print(f"The pre-existing {existing_intersight_object_name} "
"could not be updated.")
print("Exception Message: ")
traceback.print_exc()
return "The POST method failed."
else:
print("\nA configuration error has occurred!\n")
print(f"Unable to configure the {self.object_type} under the "
"Intersight API resource path "
f"'{full_intersight_api_path}'.\n")
print("Exception Message: ")
traceback.print_exc()
return "The POST method failed."
except Exception:
print("\nA configuration error has occurred!\n")
print(f"Unable to configure the {self.object_type} under the "
"Intersight API resource path "
f"'{full_intersight_api_path}'.\n")
print("Exception Message: ")
traceback.print_exc()
return "The POST method failed."
def _update_api_body_general_attributes(self):
"""This function updates the Intersight API body with general
attributes for the Intersight object.
"""
# Retrieve the Intersight Organization MOID
policy_organization_moid = intersight_object_moid_retriever(intersight_api_key_id=None,
intersight_api_key=None,
object_name=self.organization,
intersight_api_path="organization/Organizations",
object_type="Organization",
preconfigured_api_client=self.api_client
)
# Update the API body with the Intersight Organization MOID
self.intersight_api_body["Organization"] = {"Moid": policy_organization_moid}
# Create the Intersight Tags dictionary list
tags_dictionary_list = []
if self.tags:
for key in self.tags:
tags_dictionary_list_entry = {
"Key": key,
"Value": self.tags.get(key)
}
tags_dictionary_list.append(tags_dictionary_list_entry)
# Update the API body with the Intersight Tags dictionary list
self.intersight_api_body["Tags"] = tags_dictionary_list
def _update_api_body_subobject_attributes(self):
"""This function updates the Intersight API body with individual
attributes for subobjects of the Intersight object.
Raises:
Exception:
An exception occurred while reformatting a provided value for
an attribute. The issue will likely be due to the provided
value not being in string format. Changing the value to string
format should resolve the exception.
"""
def attribute_map_handler(attribute_map_dictionary):
"""This is a function to handle attributes with a mismatch in the
Front-End Name and Back-End Name.
Args:
attribute_map_dictionary (dict):
A dictionary containing attribute data, including front-end
(GUI) to back-end (API) mapped values.
"""
# Establish default automatic insertion status of current attribute
automatic_insertion_of_attribute_value_performed = False
# Check if current attribute is mandatory
if attribute_map_dictionary.get("Mandatory"):
if not any(
attribute_name_key in
staged_subobject_dictionary for
attribute_name_key in
(attribute_map_dictionary["FrontEndName"],
attribute_map_dictionary["BackEndName"]
)
):
if attribute_map_dictionary.get("AutomaticInsertion"):
staged_subobject_dictionary[attribute_map_dictionary["BackEndName"]] = attribute_map_dictionary.get("AutomaticInsertionValue")
automatic_insertion_of_attribute_value_performed = True
else:
print("\nA configuration error has occurred!\n")
print("During the configuration of the "
f"{subobject_type_dictionary['Description']} "
f"settings for the {self.object_type} named "
f"{self.policy_name}, there was an issue "
"accessing the value for the "
f"{attribute_map_dictionary['Description']}.")
print("Please verify the following key exists in the "
"appropriate dictionary of the "
f"{subobject_type_dictionary['Description']} "
f"list variable for the {self.object_type}, then "
"re-attempt execution:\n")
if attribute_map_dictionary['FrontEndName']:
print(f"'{attribute_map_dictionary['FrontEndName']}'\n")
else:
print(f"'{attribute_map_dictionary['BackEndName']}'\n")
sys.exit(0)
# Check for attribute front-end name key in the dictionary
if attribute_map_dictionary["FrontEndName"] in staged_subobject_dictionary:
# If the attribute back-end name key is not present in the dictionary, insert the back-end name key with the front-end name key value
if attribute_map_dictionary["BackEndName"] not in staged_subobject_dictionary:
staged_subobject_dictionary[attribute_map_dictionary["BackEndName"]] = staged_subobject_dictionary.get(attribute_map_dictionary["FrontEndName"])
# Remove the front-end name key from the dictionary
staged_subobject_dictionary.pop(attribute_map_dictionary["FrontEndName"])
# Check for front-end to back-end value mapping
if attribute_map_dictionary.get("FronttoBackEndValueMaps"):
if (
attribute_map_dictionary["BackEndName"] in
staged_subobject_dictionary and
not automatic_insertion_of_attribute_value_performed
):
# Retrieve the provided attribute value
provided_attribute_value = staged_subobject_dictionary.get(attribute_map_dictionary["BackEndName"])
# Reformat the provided attribute value to lowercase and remove spaces to prevent potential format issues
try:
provided_attribute_value_reformatted = "".join(provided_attribute_value.lower().split())
except Exception:
print("\nA configuration error has occurred!\n")
print("During the configuration of the "
f"{subobject_type_dictionary['Description']} "
f"settings for the {self.object_type} named "
f"{self.policy_name}, there was an issue with "
"the value for the "
f"{attribute_map_dictionary['Description']}.")
print("The value provided was "
f"{provided_attribute_value}.")
print("Please verify that the value has been provided "
"in an accepted string format.")
print("Please review and resolve any error messages, "
"then re-attempt execution.\n")
sys.exit(0)
# Create list of known and mapped front-end to back-end values
front_to_backend_value_maps_key_list = list(attribute_map_dictionary["FronttoBackEndValueMaps"])
# Replace known and reformatted front-end value with known and mapped back-end value
if provided_attribute_value_reformatted in front_to_backend_value_maps_key_list:
provided_attribute_value_mapped = attribute_map_dictionary["FronttoBackEndValueMaps"][provided_attribute_value_reformatted]
staged_subobject_dictionary[attribute_map_dictionary["BackEndName"]] = provided_attribute_value_mapped
else:
print("\nWARNING: An unknown "
f"{attribute_map_dictionary['Description']} "
f"value of '{provided_attribute_value}' has been "
"provided for the "
f"{subobject_type_dictionary['Description']} "
"settings!")
print("An attempt will be made to configure the "
"unknown "
f"{attribute_map_dictionary['Description']} "
"value.")
print("If there is an error, please use one of the "
"following known values for the "
f"{attribute_map_dictionary['Description']}, "
"then re-attempt execution:\n")
print(*attribute_map_dictionary["FixedFrontEndValues"],
sep=", "
)
# Check for subobject types that may need configuration
if self.subobject_types:
for subobject_type_dictionary in self.subobject_types:
subobject_list = getattr(self,
subobject_type_dictionary.get("SubobjectList")
)
if subobject_list:
converted_subobject_list = []
for subobject_dictionary in subobject_list:
staged_subobject_dictionary = copy.deepcopy(subobject_dictionary)
returned_subobject_type = subobject_type_dictionary["SubobjectType"]
# Set 'ClassId' attribute
staged_subobject_dictionary["ClassId"] = returned_subobject_type
# Set 'ObjectType' attribute
staged_subobject_dictionary["ObjectType"] = returned_subobject_type
# Handle setting of attributes which have mismatched Front-End and Back-End names
for attribute_map_dictionary in self.subobject_attribute_maps.get(returned_subobject_type):
attribute_map_handler(attribute_map_dictionary)
converted_subobject_dictionary = staged_subobject_dictionary
converted_subobject_list.append(converted_subobject_dictionary)
# Update Intersight API body with the converted sub-objects list
self.intersight_api_body[subobject_type_dictionary["AttributeName"]] = converted_subobject_list
def _update_api_body_mapped_object_attributes(self):
"""This function updates the Intersight API body with individual
attributes that require mapping frontend to backend values for
compatibility with the Intersight API.
Raises:
Exception:
An exception occurred while reformatting a provided value for
an attribute. The issue will likely be due to the provided
value not being in string format. Changing the value to string
format should resolve the exception.
"""
# Check for object variables with value maps that need configuration
if self.object_variable_value_maps:
for object_variable in self.object_variable_value_maps:
# Create list of all known and accepted frontend values
all_known_and_accepted_frontend_values = (object_variable_value["FrontEndValue"]
for
object_variable_value
in
object_variable["Values"]
)
# Retrieve the user provided object variable value
provided_object_variable_value = getattr(self,
object_variable["VariableName"]
)
# Reformat the user provided object variable value to lowercase and remove spaces to prevent potential format issues
try:
reformatted_object_variable_value = "".join(provided_object_variable_value.lower().split())
except Exception:
print("\nA configuration error has occurred!\n")
print(f"During the configuration of the {self.object_type} named "
f"{self.policy_name}, there was an issue with the value "
f"provided for the {object_variable['Description']} setting.")
print(f"The value provided was {provided_object_variable_value}.")
print("To proceed, the value provided for the "
f"{object_variable['Description']} setting should be updated to "
"an accepted string format.")
print("The recommended values are the following:\n")
# Print list of all known and accepted frontend values for user
print(*all_known_and_accepted_frontend_values,
sep=", "
)
print("\nPlease update the configuration, then re-attempt "
"execution.\n")
sys.exit(0)
# Cycle through known values and match provided object variable value to backend value
for object_variable_value in object_variable["Values"]:
# Create list of all known and accepted frontend and backend values
current_known_frontend_and_backend_value_options = (object_variable_value.values())
# Retrieve the current known backend value
current_known_backend_value = object_variable_value["BackEndValue"]
if (
reformatted_object_variable_value
in
("".join(current_known_frontend_or_backend_value.lower().split())
for
current_known_frontend_or_backend_value
in
current_known_frontend_and_backend_value_options
)
):
backend_object_variable_value = current_known_backend_value
break
else:
# If no backend match is found with the user provided object variable value, pass on the user provided object variable value to Intersight to decide
print(f"\nWARNING: An unknown {self.object_type} value of "
f"'{provided_object_variable_value}' has been "
f"provided for the {object_variable['Description']} "
"settings!")
print("An attempt will be made to configure the unknown "
f"{object_variable['Description']} value.")
print("If there is an error, please use one of the "
"following known values for the "
f"{object_variable['Description']} settings, then "
"re-attempt execution:\n")
print(*all_known_and_accepted_frontend_values,
sep=", "
)
backend_object_variable_value = provided_object_variable_value
# Update Intersight API body with the converted object variable value
self.intersight_api_body[object_variable["AttributeName"]] = backend_object_variable_value
def object_maker(self):
"""This function makes the targeted policy object.
"""
print(f"\nConfiguring the {self.object_type} named "
f"{self.policy_name}...")
# Update the API body with general attributes
self._update_api_body_general_attributes()
# Update the API body with individual subobject attributes
self._update_api_body_subobject_attributes()
# Update the API body with individual mapped object attributes
self._update_api_body_mapped_object_attributes()
# POST the API body to Intersight
self._post_intersight_object()
class DirectlyAttachedUcsServerPolicy(UcsPolicy):
"""This class is used to configure a UCS Server Policy in Intersight that
is logically directly attached to UCS Servers through UCS
Server Profiles.
"""
object_type = "Directly Attached UCS Server Policy"
intersight_api_path = None
def __init__(self,
intersight_api_key_id,
intersight_api_key,
policy_name,
policy_description="",
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
tags=None,
preconfigured_api_client=None,
ucs_server_profile_name=""
):
super().__init__(intersight_api_key_id,
intersight_api_key,
policy_name,
policy_description,
organization,
intersight_base_url,
tags,
preconfigured_api_client
)
self.ucs_server_profile_name = ucs_server_profile_name
def __repr__(self):
return (
f"{self.__class__.__name__}"
f"('{self.intersight_api_key_id}', "
f"'{self.intersight_api_key}', "
f"'{self.policy_name}', "
f"'{self.policy_description}', "
f"'{self.organization}', "
f"'{self.intersight_base_url}', "
f"{self.tags}, "
f"{self.api_client}, "
f"'{self.ucs_server_profile_name}')"
)
def _attach_ucs_server_profile(self):
"""This is a function to attach an Intersight UCS Server Profile to an
Intersight Policy.
Returns:
A dictionary for the API body of the policy object to be posted on
Intersight.
"""
# Attach UCS Server Profile
if self.ucs_server_profile_name:
print("Attaching the UCS Server Profile named "
f"{self.ucs_server_profile_name}...")
# Get UCS Server Profile MOID
ucs_server_profile_moid = intersight_object_moid_retriever(intersight_api_key_id=None,
intersight_api_key=None,
object_name=self.ucs_server_profile_name,
intersight_api_path="server/Profiles",
object_type="UCS Server Profile",
organization=self.organization,
preconfigured_api_client=self.api_client
)
# Update the API body with the appropriate Server Profile MOID
self.intersight_api_body["Profiles"] = [
{"Moid": ucs_server_profile_moid,
"ObjectType": "server.Profile"}
]
def object_maker(self):
"""This function makes the targeted policy object.
"""
print(f"\nConfiguring the {self.object_type} named "
f"{self.policy_name}...")
# Update the API body with general attributes
self._update_api_body_general_attributes()
# Update the API body with individual subobject attributes
self._update_api_body_subobject_attributes()
# Update the API body with individual mapped object attributes
self._update_api_body_mapped_object_attributes()
# Update the API body with a UCS Server Profile attached, if specified
self._attach_ucs_server_profile()
# POST the API body to Intersight
self._post_intersight_object()
class IpmiOverLanPolicy(DirectlyAttachedUcsServerPolicy):
"""This class is used to configure a IPMI Over LAN Policy in Intersight.
"""
object_type = "IPMI Over LAN Policy"
intersight_api_path = "ipmioverlan/Policies"
def __init__(self,
intersight_api_key_id,
intersight_api_key,
policy_name,
policy_description="",
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
tags=None,
preconfigured_api_client=None,
ucs_server_profile_name="",
enable_ipmi_over_lan=True,
privilege_level="admin",
encryption_key=""
):
super().__init__(intersight_api_key_id,
intersight_api_key,
policy_name,
policy_description,
organization,
intersight_base_url,
tags,
preconfigured_api_client,
ucs_server_profile_name
)
self.enable_ipmi_over_lan = enable_ipmi_over_lan
self.privilege_level = privilege_level
self.encryption_key = encryption_key
self.intersight_api_body = {
"Name": self.policy_name,
"Description": self.policy_description,
"Enabled": self.enable_ipmi_over_lan,
"Privilege": self.privilege_level,
"EncryptionKey": self.encryption_key
}
def __repr__(self):
return (
f"{self.__class__.__name__}"
f"('{self.intersight_api_key_id}', "
f"'{self.intersight_api_key}', "
f"'{self.policy_name}', "
f"'{self.policy_description}', "
f"'{self.organization}', "
f"'{self.intersight_base_url}', "
f"{self.tags}, "
f"{self.api_client}, "
f"'{self.ucs_server_profile_name}', "
f"{self.enable_ipmi_over_lan}, "
f"{self.privilege_level}, "
f"'{self.encryption_key}')"
)
# Establish function to make Policy
def ipmi_over_lan_policy_maker(
intersight_api_key_id,
intersight_api_key,
policy_name,
enable_ipmi_over_lan=True,
privilege_level="admin",
encryption_key="",
policy_description="",
organization="default",
intersight_base_url="https://www.intersight.com/api/v1",
tags=None,
preconfigured_api_client=None,
ucs_server_profile_name=""
):
"""This is a function used to make a IPMI Over LAN Policy on Cisco Intersight.
Args:
intersight_api_key_id (str):
The ID of the Intersight API key.
intersight_api_key (str):
The system file path of the Intersight API key.
policy_name (str):
The name of the policy to be created.
enable_ipmi_over_lan (bool):
Optional; The setting to enable the IPMI Over LAN service. The
default value is True.
privilege_level (str):
Optional; The highest privilege level assignable to an IPMI Over
LAN session. The accepted values are "admin", "user", and
"read-only". The default value is "admin".
encryption_key (str):
Optional; The encryption key for IPMI Over LAN sessions. The
encryption key should have an even number of hexadecimal characters
that does not exceed 40 total characters. The default value is ("").
policy_description (str):
Optional; The description of the policy to be created. The default
value is an empty string ("").
organization (str):
Optional; The Intersight account organization of the policy.
The default value is "default".
intersight_base_url (str):
Optional; The base URL for Intersight API paths. The default value
is "https://www.intersight.com/api/v1". This value typically only
needs to be changed if using the Intersight Virtual Appliance.
tags (dict):
Optional; The Intersight account tags that will be assigned to the
policy. The default value is None.
preconfigured_api_client ("ApiClient"):
Optional; An ApiClient class instance which handles
Intersight client-server communication through the use of API keys.
The default value is None. If a preconfigured_api_client argument
is provided, empty strings ("") or None can be provided for the
intersight_api_key_id, intersight_api_key, and intersight_base_url
arguments.
ucs_server_profile_name (str):
Optional; The UCS Server Profile the policy should be attached to.
The default value is an empty string ("").
"""
def builder(target_object):
"""This is a function used to build the objects that are components of
an overarching pool or policy on Cisco Intersight.
Args:
target_object (class):
The class representing the object to be built on Intersight.
Raises:
Exception:
An exception occurred due to an issue accessing the Intersight
API path. The status code or error message will be specified.
"""
try:
target_object.object_maker()
except Exception:
print("\nA configuration error has occurred!\n")
print("The builder function failed to configure the "
f"{target_object.object_type} settings.")
print("Please check the provided arguments for the "
f"{target_object.object_type} settings.\n")
print("Exception Message: ")
traceback.print_exc()
# Define and create IPMI Over LAN Policy object in Intersight
builder(
IpmiOverLanPolicy(
intersight_api_key_id=intersight_api_key_id,
intersight_api_key=intersight_api_key,
policy_name=policy_name,
policy_description=policy_description,
organization=organization,
intersight_base_url=intersight_base_url,
tags=tags,
preconfigured_api_client=preconfigured_api_client,
ucs_server_profile_name=ucs_server_profile_name,
enable_ipmi_over_lan=enable_ipmi_over_lan,
privilege_level=privilege_level,
encryption_key=encryption_key
))
def main():
# Establish Maker specific variables
maker_type = "Intersight IPMI Over LAN Policy Maker"
# Establish Intersight SDK for Python API client instance
main_intersight_api_client = get_api_client(api_key_id=key_id,
api_secret_file=key,
endpoint=intersight_base_url
)
# Starting the Policy Maker for Cisco Intersight
print(f"\nStarting the {maker_type} for Cisco Intersight.\n")
# Run the Intersight API and Account Availability Test
print("Running the Intersight API and Account Availability Test.")
test_intersight_api_service(
intersight_api_key_id=None,
intersight_api_key=None,
preconfigured_api_client=main_intersight_api_client
)
# Create the Policy in Intersight
ipmi_over_lan_policy_maker(
intersight_api_key_id=None,
intersight_api_key=None,
policy_name=ipmi_over_lan_policy_name,
enable_ipmi_over_lan=enable_ipmi_over_lan,
privilege_level=privilege_level,
encryption_key=encryption_key,
policy_description=ipmi_over_lan_policy_description,
organization=ipmi_over_lan_policy_organization,
intersight_base_url=intersight_base_url,
tags=ipmi_over_lan_policy_tags,
preconfigured_api_client=main_intersight_api_client,
ucs_server_profile_name=ucs_server_profile_name
)
# Policy Maker completion
print(f"\nThe {maker_type} has completed.\n")
if __name__ == "__main__":
main()
# Exiting the Policy Maker for Cisco Intersight
sys.exit(0)
| 50.774834 | 177 | 0.564253 |
794410c504054c2f3d8ce66dc822043d43725bb7 | 102 | py | Python | automatic_card/apps.py | One-Xiao-Yi/class_schedule_card | b9aa917576f9e27a882d54c036dbfc0fbd8538fa | [
"Apache-2.0"
] | 1 | 2019-10-24T10:23:44.000Z | 2019-10-24T10:23:44.000Z | automatic_card/apps.py | One-Xiao-Yi/class_schedule_card | b9aa917576f9e27a882d54c036dbfc0fbd8538fa | [
"Apache-2.0"
] | 6 | 2020-06-05T23:36:36.000Z | 2022-02-10T09:28:33.000Z | automatic_card/apps.py | One-Xiao-Yi/class_schedule_card | b9aa917576f9e27a882d54c036dbfc0fbd8538fa | [
"Apache-2.0"
] | 2 | 2019-11-20T09:05:23.000Z | 2020-02-17T16:41:38.000Z | from django.apps import AppConfig
class AutomaticCardConfig(AppConfig):
name = 'automatic_card'
| 17 | 37 | 0.784314 |
794410cf2a31b366f08fa801a58b51478394d4fc | 11,201 | py | Python | docs/conf.py | spavuluri/watchmaker | a448e6a7ead1af7bcff23f0099540ce395b7eb4f | [
"Apache-2.0"
] | null | null | null | docs/conf.py | spavuluri/watchmaker | a448e6a7ead1af7bcff23f0099540ce395b7eb4f | [
"Apache-2.0"
] | 8 | 2018-02-02T17:14:28.000Z | 2018-05-22T00:11:37.000Z | docs/conf.py | YakDriver/watchmaker | e7df44637c82b092bd413c54abc8eb472b7eb876 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Configure Watchmaker documentation."""
from __future__ import (absolute_import, division, print_function,
unicode_literals, with_statement)
import os
import sys
from recommonmark.transform import AutoStructify
#
# Created by sphinx-quickstart on Thu Jun 30 20:11:36 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
sys.path.insert(0, os.path.abspath('../src/'))
rtd = os.environ.get('READTHEDOCS', None) == 'True'
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# extensions = []
extensions = [
'recommonmark',
'sphinx.ext.autodoc',
'sphinx.ext.autosectionlabel',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
]
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
}
autoclass_content = 'class'
autodoc_member_order = 'bysource'
autodoc_default_options = {'members': True, 'show-inheritance': True}
napoleon_use_param = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffixes as a map of suffix => parser:
#
# source_suffix = {
# '.md': 'markdown',
# '.rst': 'restructuredtext',
# }
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Watchmaker'
copyright = u'2016, Plus3 IT Systems' # noqa: A001
author = u'Plus3 IT Systems'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = u'0.1'
# The full version, including alpha/beta/rc tags.
# release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme
# Add any paths that contain custom themes here, relative to this directory.
#
# html_them_path
if not rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'MothBall v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or
# 32x32 pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Watchmaker'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Watchmaker.tex', u'Watchmaker Documentation',
u'Plus3 IT Systems', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'watchmaker', u'Watchmaker Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Watchmaker', u'Watchmaker Documentation',
author, 'Watchmaker', 'Applied Configuration Management.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# Prefix document path to section labels, otherwise autogenerated labels would
# look like 'heading' rather than 'path/to/file:heading'
autosectionlabel_prefix_document = True
linkcheck_ignore = [
r'https://github.com/plus3it/watchmaker/compare/(\d+\.){3}\.(\.\d+){3}',
r'https://github.com/plus3it/watchmaker/compare/(\d+\.){3}\.(\.x){3}',
r'https://watchmaker.cloudarmor.io/releases/(.*)$',
r'https://docs\.saltstack\.com/en/latest/ref/modules/all/[a-z\.]*#[a-z\.]*'
]
def setup(app): # noqa: D103
app.add_stylesheet("theme_overrides.css")
app.add_config_value(
'recommonmark_config',
{
'enable_eval_rst': True,
},
True,
)
app.add_transform(AutoStructify)
| 28.57398 | 79 | 0.698688 |
7944115783846a4d235ffe88f298017e03d3d314 | 3,695 | py | Python | test/functional/zerocoin_valid_public_spend.py | AtomicLemon/bitcoinflex | fe02bd48be01e08a047ef8d5821eb247a0681306 | [
"MIT"
] | null | null | null | test/functional/zerocoin_valid_public_spend.py | AtomicLemon/bitcoinflex | fe02bd48be01e08a047ef8d5821eb247a0681306 | [
"MIT"
] | null | null | null | test/functional/zerocoin_valid_public_spend.py | AtomicLemon/bitcoinflex | fe02bd48be01e08a047ef8d5821eb247a0681306 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2019 The PIVX developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Covers the 'Wrapped Serials Attack' scenario
'''
import random
from time import sleep
from test_framework.authproxy import JSONRPCException
from test_framework.util import assert_equal, assert_greater_than
from fake_stake.base_test import BitcoinFlex_FakeStakeTest
class zBCXValidCoinSpendTest(BitcoinFlex_FakeStakeTest):
def run_test(self):
self.description = "Covers the 'valid publicCoinSpend spend' scenario."
self.init_test()
INITAL_MINED_BLOCKS = 301 # Blocks mined before minting
MORE_MINED_BLOCKS = 52 # Blocks mined after minting (before spending)
DENOM_TO_USE = 1 # zc denomination used for double spending attack
# 1) Start mining blocks
self.log.info("Mining %d first blocks..." % INITAL_MINED_BLOCKS)
self.node.generate(INITAL_MINED_BLOCKS)
sleep(2)
# 2) Mint zerocoins
self.log.info("Minting %d-denom zBCXs..." % DENOM_TO_USE)
self.node.mintzerocoin(DENOM_TO_USE)
self.node.generate(1)
sleep(2)
self.node.mintzerocoin(DENOM_TO_USE)
sleep(2)
# 3) Mine more blocks and collect the mint
self.log.info("Mining %d more blocks..." % MORE_MINED_BLOCKS)
self.node.generate(MORE_MINED_BLOCKS)
sleep(2)
list = self.node.listmintedzerocoins(True, True)
mint = list[0]
# 4) Get the raw zerocoin data
exported_zerocoins = self.node.exportzerocoins(False)
zc = [x for x in exported_zerocoins if mint["serial hash"] == x["id"]]
if len(zc) == 0:
raise AssertionError("mint not found")
# 5) Spend the minted coin (mine six more blocks)
self.log.info("Spending the minted coin with serial %s and mining six more blocks..." % zc[0]["s"])
txid = self.node.spendzerocoinmints([mint["serial hash"]])['txid']
self.log.info("Spent on tx %s" % txid)
self.node.generate(6)
sleep(2)
rawTx = self.node.getrawtransaction(txid, 1)
if rawTx is None:
self.log.warning("rawTx is: %s" % rawTx)
raise AssertionError("TEST FAILED")
else:
assert (rawTx["confirmations"] == 6)
self.log.info("%s VALID PUBLIC COIN SPEND PASSED" % self.__class__.__name__)
self.log.info("%s Trying to spend the serial twice now" % self.__class__.__name__)
serial = zc[0]["s"]
randomness = zc[0]["r"]
privkey = zc[0]["k"]
tx = None
try:
tx = self.node.spendrawzerocoin(serial, randomness, DENOM_TO_USE, privkey)
except JSONRPCException as e:
self.log.info("GOOD: Transaction did not verify")
if tx is not None:
self.log.warning("Tx is: %s" % tx)
raise AssertionError("TEST FAILED")
self.log.info("%s DOUBLE SPENT SERIAL NOT VERIFIED, TEST PASSED" % self.__class__.__name__)
self.log.info("%s Trying to spend using the old coin spend method.." % self.__class__.__name__)
tx = None
try:
tx = self.node.spendzerocoin(DENOM_TO_USE, False, False, "", False)
raise AssertionError("TEST FAILED, old coinSpend spent")
except JSONRPCException as e:
self.log.info("GOOD: spendzerocoin old spend did not verify")
self.log.info("%s OLD COIN SPEND NON USABLE ANYMORE, TEST PASSED" % self.__class__.__name__)
if __name__ == '__main__':
zBCXValidCoinSpendTest().main()
| 35.873786 | 107 | 0.64249 |
794411bb543d6d82c2c2d89a0688cdc2010457a1 | 13,284 | py | Python | reviewboard/webapi/base.py | znick/reviewboard | f32320b267efcdf2feff1661eabe57f99ef490a7 | [
"MIT"
] | null | null | null | reviewboard/webapi/base.py | znick/reviewboard | f32320b267efcdf2feff1661eabe57f99ef490a7 | [
"MIT"
] | null | null | null | reviewboard/webapi/base.py | znick/reviewboard | f32320b267efcdf2feff1661eabe57f99ef490a7 | [
"MIT"
] | 1 | 2021-11-23T15:25:44.000Z | 2021-11-23T15:25:44.000Z | from __future__ import unicode_literals
from django.contrib import auth
from django.db.models import Q
from django.utils import six
from django.utils.encoding import force_unicode
from djblets.util.decorators import augment_method_from
from djblets.webapi.decorators import (webapi_login_required,
webapi_request_fields)
from djblets.webapi.errors import NOT_LOGGED_IN, PERMISSION_DENIED
from djblets.webapi.resources import WebAPIResource as DjbletsWebAPIResource
from reviewboard.site.models import LocalSite
from reviewboard.site.urlresolvers import local_site_reverse
from reviewboard.webapi.decorators import (webapi_check_local_site,
webapi_check_login_required)
from reviewboard.webapi.models import WebAPIToken
CUSTOM_MIMETYPE_BASE = 'application/vnd.reviewboard.org'
EXTRA_DATA_LEN = len('extra_data.')
class WebAPIResource(DjbletsWebAPIResource):
"""A specialization of the Djblets WebAPIResource for Review Board."""
mimetype_vendor = 'reviewboard.org'
api_token_access_allowed = True
@property
def policy_id(self):
"""Returns the ID used for access policies.
This defaults to the name of the resource, but can be overridden
in case the name is not specific enough or there's a conflict.
"""
return self.name
def call_method_view(self, request, method, view, *args, **kwargs):
# This will associate the token, if any, with the request.
webapi_token = self._get_api_token_for_request(request)
if webapi_token:
if not self.api_token_access_allowed:
return PERMISSION_DENIED
policy = webapi_token.policy
resources_policy = policy.get('resources')
if resources_policy:
resource_id = kwargs.get(self.uri_object_key)
if not self.is_resource_method_allowed(resources_policy,
method, resource_id):
# The token's policies disallow access to this resource.
return PERMISSION_DENIED
return view(request, *args, **kwargs)
def has_access_permissions(self, *args, **kwargs):
# By default, raise an exception if this is called. Specific resources
# will have to explicitly override this and opt-in to access.
raise NotImplementedError(
'%s must provide a has_access_permissions method'
% self.__class__.__name__)
@webapi_check_login_required
@webapi_check_local_site
@augment_method_from(DjbletsWebAPIResource)
def get(self, *args, **kwargs):
"""Returns the serialized object for the resource.
This will require login if anonymous access isn't enabled on the
site.
"""
pass
@webapi_check_login_required
@webapi_check_local_site
@webapi_request_fields(
optional=dict({
'counts-only': {
'type': bool,
'description': 'If specified, a single ``count`` field is '
'returned with the number of results, instead '
'of the results themselves.',
},
}, **DjbletsWebAPIResource.get_list.optional_fields),
required=DjbletsWebAPIResource.get_list.required_fields,
allow_unknown=True
)
def get_list(self, request, *args, **kwargs):
"""Returns a list of objects.
This will require login if anonymous access isn't enabled on the
site.
If ``?counts-only=1`` is passed on the URL, then this will return
only a ``count`` field with the number of entries, instead of the
serialized objects.
"""
if self.model and request.GET.get('counts-only', False):
return 200, {
'count': self.get_queryset(request, is_list=True,
*args, **kwargs).count()
}
else:
return self._get_list_impl(request, *args, **kwargs)
@webapi_login_required
@webapi_check_local_site
@augment_method_from(DjbletsWebAPIResource)
def delete(self, *args, **kwargs):
pass
def _get_list_impl(self, request, *args, **kwargs):
"""Actual implementation to return the list of results.
This by default calls the parent WebAPIResource.get_list, but this
can be overridden by subclasses to provide a more custom
implementation while still retaining the ?counts-only=1 functionality.
"""
return super(WebAPIResource, self).get_list(request, *args, **kwargs)
def get_href(self, obj, request, *args, **kwargs):
"""Returns the URL for this object.
This is an override of get_href, which takes into account our
local_site_name namespacing in order to get the right prefix on URLs.
"""
if not self.uri_object_key:
return None
href_kwargs = {
self.uri_object_key: getattr(obj, self.model_object_key),
}
href_kwargs.update(self.get_href_parent_ids(obj, **kwargs))
return request.build_absolute_uri(
self.get_item_url(request=request, **href_kwargs))
def get_list_url(self, **kwargs):
"""Returns the URL to the list version of this resource.
This will generate a URL for the resource, given the provided
arguments for the URL pattern.
"""
return self._get_resource_url(self.name_plural, **kwargs)
def get_item_url(self, **kwargs):
"""Returns the URL to the item version of this resource.
This will generate a URL for the resource, given the provided
arguments for the URL pattern.
"""
return self._get_resource_url(self.name, **kwargs)
def build_queries_for_int_field(self, request, field_name,
query_param_name=None):
"""Builds queries based on request parameters for an int field.
get_queryset() implementations can use this to allow callers to
filter results through range matches. Callers can search for exact
matches, or can do <, <=, >, or >= matches.
"""
if not query_param_name:
query_param_name = field_name.replace('_', '-')
q = Q()
if query_param_name in request.GET:
q = q & Q(**{field_name: request.GET[query_param_name]})
for op in ('gt', 'gte', 'lt', 'lte'):
param = '%s-%s' % (query_param_name, op)
if param in request.GET:
query_field = '%s__%s' % (field_name, op)
q = q & Q(**{query_field: request.GET[param]})
return q
def is_resource_method_allowed(self, resources_policy, method,
resource_id):
"""Returns whether a method can be performed on a resource.
A method can be performed if a specific per-resource policy allows
it, and the global policy also allows it.
The per-resource policy takes precedence over the global policy.
If, for instance, the global policy blocks and the resource policies
allows, the method will be allowed.
If no policies apply to this, then the default is to allow.
"""
# First check the resource policy. For this, we'll want to look in
# both the resource ID and the '*' wildcard.
resource_policy = resources_policy.get(self.policy_id)
if resource_policy:
permission = self._check_resource_policy(
resource_policy, method, [resource_id, '*'])
if permission is not None:
return permission
# Nothing was found there. Now check in the global policy. Note that
# there isn't a sub-key of 'resources.*', so we'll check based on
# resources_policy.
if '*' in resources_policy:
permission = self._check_resource_policy(
resources_policy, method, ['*'])
if permission is not None:
return permission
return True
def _check_resource_policy(self, policy, method, keys):
"""Checks the policy for a specific resource and method.
This will grab the resource policy for the given policy ID,
and see if a given method can be performed on that resource,
without factoring in any global policy rules.
If the method is allowed and restrict_ids is True, this will then
check if the resource should be blocked based on the ID.
In case of a conflict, blocked policies always trump allowed
policies.
"""
for key in keys:
sub_policy = policy.get(key)
if sub_policy:
# We first want to check the specific values, to see if they've
# been singled out. If not found, we'll check the wildcards.
#
# Blocked values always take precedence over allowed values.
allowed = sub_policy.get('allow', [])
blocked = sub_policy.get('block', [])
if method in blocked:
return False
elif method in allowed:
return True
elif '*' in blocked:
return False
elif '*' in allowed:
return True
return None
def _get_api_token_for_request(self, request):
webapi_token = getattr(request, '_webapi_token', None)
if not webapi_token:
webapi_token_id = request.session.get('webapi_token_id')
if webapi_token_id:
try:
webapi_token = WebAPIToken.objects.get(pk=webapi_token_id,
user=request.user)
except WebAPIToken.DoesNotExist:
# This token is no longer valid. Log the user out.
auth.logout(request)
request._webapi_token = webapi_token
return webapi_token
def _get_queryset(self, request, is_list=False, *args, **kwargs):
"""Returns the queryset for the resource.
This is a specialization of the Djblets WebAPIResource._get_queryset(),
which imposes further restrictions on the queryset results if using
a WebAPIToken for authentication that defines a policy.
Any items in the queryset that are denied by the policy will be
excluded from the results.
"""
queryset = super(WebAPIResource, self)._get_queryset(
request, is_list=is_list, *args, **kwargs)
if is_list:
# We'll need to filter the list of results down to exclude any
# that are blocked for GET access by the token policy.
webapi_token = self._get_api_token_for_request(request)
if webapi_token:
resources_policy = webapi_token.policy.get('resources', {})
resource_policy = resources_policy.get(self.policy_id)
if resource_policy:
resource_ids = [
resource_id
for resource_id in six.iterkeys(resource_policy)
if (resource_id != '*' and
not self._check_resource_policy(
resources_policy, self.policy_id, 'GET',
resource_id, True))
]
if resource_ids:
queryset = queryset.exclude(**{
self.model_object_key + '__in': resource_ids,
})
return queryset
def _get_resource_url(self, name, local_site_name=None, request=None,
**kwargs):
return local_site_reverse(
self._build_named_url(name),
local_site_name=local_site_name,
request=request,
kwargs=kwargs)
def _get_local_site(self, local_site_name):
if local_site_name:
return LocalSite.objects.get(name=local_site_name)
else:
return None
def _get_form_errors(self, form):
fields = {}
for field in form.errors:
fields[field] = [force_unicode(e) for e in form.errors[field]]
return fields
def _no_access_error(self, user):
"""Returns a WebAPIError indicating the user has no access.
Which error this returns depends on whether or not the user is logged
in. If logged in, this will return _no_access_error(request.user).
Otherwise, it will return NOT_LOGGED_IN.
"""
if user.is_authenticated():
return PERMISSION_DENIED
else:
return NOT_LOGGED_IN
def _import_extra_data(self, extra_data, fields):
for key, value in six.iteritems(fields):
if key.startswith('extra_data.'):
key = key[EXTRA_DATA_LEN:]
if value != '':
extra_data[key] = value
elif key in extra_data:
del extra_data[key]
| 37.419718 | 79 | 0.606143 |
794411f77bab229866674303c054955dd00b1c5e | 262 | py | Python | plugins/NetQuery/FalseQuery/__init__.py | madfordmac/pircons | 1eea5e46e7ea89984d69da2817a86e14f8155202 | [
"MIT"
] | null | null | null | plugins/NetQuery/FalseQuery/__init__.py | madfordmac/pircons | 1eea5e46e7ea89984d69da2817a86e14f8155202 | [
"MIT"
] | null | null | null | plugins/NetQuery/FalseQuery/__init__.py | madfordmac/pircons | 1eea5e46e7ea89984d69da2817a86e14f8155202 | [
"MIT"
] | null | null | null | from .. import NetQuery
class FalseQuery(NetQuery):
"""Always unsuccessful query."""
def __init__(self, cfg):
super(FalseQuery, self).__init__(cfg)
def query(self):
"""Always return offline. Intended for debugging.
:return: False
"""
return False
| 20.153846 | 51 | 0.706107 |
7944138a496e0c1db5bb1a605e15b6989a763296 | 9,414 | py | Python | p3/management/commands/attendify_schedule_xlsx.py | malemburg/epcon | 1edec493ac1258950dcabdc9f9ee8b97c24f96c5 | [
"BSD-2-Clause"
] | null | null | null | p3/management/commands/attendify_schedule_xlsx.py | malemburg/epcon | 1edec493ac1258950dcabdc9f9ee8b97c24f96c5 | [
"BSD-2-Clause"
] | null | null | null | p3/management/commands/attendify_schedule_xlsx.py | malemburg/epcon | 1edec493ac1258950dcabdc9f9ee8b97c24f96c5 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
""" Update an Attendify schedule XLSX file with the currently accepted
talks.
Usage: manage.py attendify_schedule_xlsx ep2016 schedule.xlsx
Note that for Attendify you have to download the schedule before
running this script, since they add meta data to the downloaded
file which has to be kept around when uploading it again.
The script updates schedule.xlsx in place. Unfortunately, Attendify
currently has a bug in that it doesn't accept the file format
generated by openpyxl. Opening the file in LibreOffice and saving
it (without changes) fixes this as work-around.
Attendify Worksheet "Schedule" format
-------------------------------------
Row A4: Session Title, Date (MM/DD/YYYY), Start Time (HH:MM), End
Time (HH:MM), Description (Optional), Location (Optional), Track
Title (Optional), UID (do not delete)
Row A6: Start of data
"""
from django.core.management.base import BaseCommand, CommandError
from django.core import urlresolvers
from django.utils.html import strip_tags
from conference import models
from conference import utils
import datetime
from collections import defaultdict
from optparse import make_option
import operator
import markdown2
import openpyxl
### Globals
# Debug output ?
_debug = 0
# These must match the talk .type or .admin_type
from accepted_talks import TYPE_NAMES
# Special handling of poster sessions
if 0:
# Poster sessions don't have events associated with them, so use
# these defaults
ADJUST_POSTER_SESSIONS = True
POSTER_START = datetime.datetime(2016,7,19,15,15) # TBD
POSTER_DURATION = datetime.timedelta(minutes=90)
POSTER_ROOM = u'Exhibition Hall'
else:
ADJUST_POSTER_SESSIONS = False
### Helpers
def profile_url(user):
return urlresolvers.reverse('conference-profile',
args=[user.attendeeprofile.slug])
def speaker_listing(talk):
return u', '.join(
u'<i>%s %s</i>' % (
speaker.user.first_name,
speaker.user.last_name)
for speaker in talk.get_all_speakers())
def format_text(text, remove_tags=False, output_html=True):
# Remove whitespace
text = text.strip()
if not text:
return text
# Remove links, tags, etc.
if remove_tags:
text = strip_tags(text)
# Remove quotes
if text[0] == '"' and text[-1] == '"':
text = text[1:-1]
# Convert markdown markup to HTML
if output_html:
text = markdown2.markdown(text)
return text
def talk_title(talk):
title = format_text(talk.title, remove_tags=True, output_html=False)
if not title:
return title
return title
def talk_abstract(talk):
return '<p>By %s</p>\n\n%s' % (
speaker_listing(talk),
format_text(talk.getAbstract().body))
def event_title(event):
title = format_text(event.custom, remove_tags=True, output_html=False)
if not title:
return title
return title
def event_abstract(event):
return format_text(event.abstract)
def add_event(data, talk=None, event=None, session_type='', talk_events=None):
# Determine title and abstract
title = ''
abstract = ''
if talk is None:
if event is None:
raise TypeError('need either talk or event given')
title = event_title(event)
abstract = event_abstract(event)
else:
title = talk_title(talk)
abstract = talk_abstract(talk)
if event is None:
event = talk.get_event()
# Determine time_range and room
if event is None:
if talk.type and talk.type[:1] == 'p' and ADJUST_POSTER_SESSIONS:
# Poster session
time_range = (POSTER_START,
POSTER_START + POSTER_DURATION)
room = POSTER_ROOM
else:
print ('Talk %r (type %r) does not have an event '
'associated with it; skipping' %
(title, talk.type))
return
else:
time_range = event.get_time_range()
tracks = event.tracks.all()
if tracks:
room = tracks[0].title
else:
room = u''
if talk_events is not None:
talk_events[event.pk] = event
# Don't add entries for events without title
if not title:
return
# Format time entries
date = time_range[0].strftime('%m/%d/%Y')
start_time = time_range[0].strftime('%H:%M')
stop_time = time_range[1].strftime('%H:%M')
# UID
uid = u''
data.append((
title,
date,
start_time,
stop_time,
abstract,
room,
session_type,
uid,
))
# Start row of data in spreadsheet (Python 0-based index)
SCHEDULE_WS_START_DATA = 5
# Column number of UID columns (Python 0-based index)
SCHEDULE_UID_COLUMN = 7
# Number of columns to make row unique (title, date, start, end)
SCHEDULE_UNIQUE_COLS = 4
def update_schedule(schedule_xlsx, new_data, updated_xlsx=None):
# Load workbook
wb = openpyxl.load_workbook(schedule_xlsx)
assert wb.sheetnames == [u'Instructions', u'Schedule', u'System']
ws = wb['Schedule']
# Extract data values
ws_data = list(ws.values)[SCHEDULE_WS_START_DATA:]
print ('read %i data lines' % len(ws_data))
print ('first line: %r' % ws_data[:1])
print ('last line: %r' % ws_data[-1:])
# Reconcile UIDs / talks
uids = {}
for line in ws_data:
uid = line[SCHEDULE_UID_COLUMN]
if not uid:
continue
uids[tuple(line[:SCHEDULE_UNIQUE_COLS])] = uid
# Add UID to new data
new_schedule = []
for line in new_data:
key = tuple(line[:SCHEDULE_UNIQUE_COLS])
if key not in uids:
print ('New or rescheduled talk %s found' % (key,))
uid = u''
else:
uid = uids[key]
line = tuple(line[:SCHEDULE_UID_COLUMN]) + (uid,)
new_schedule.append(line)
new_data = new_schedule
# Replace old data with new data
old_data_rows = len(ws_data)
new_data_rows = len(new_data)
print ('new data: %i data lines' % new_data_rows)
offset = SCHEDULE_WS_START_DATA + 1
print ('new_data = %i rows' % len(new_data))
for j, row in enumerate(ws[offset: offset + new_data_rows - 1]):
new_row = new_data[j]
if _debug:
print ('updating row %i with %r' % (j, new_row))
if len(row) > len(new_row):
row = row[:len(new_row)]
for i, cell in enumerate(row):
cell.value = new_row[i]
# Overwrite unused cells with None
if new_data_rows < old_data_rows:
for j, row in enumerate(ws[offset + new_data_rows + 1:
offset + old_data_rows + 1]):
if _debug:
print ('clearing row %i' % (j,))
for i, cell in enumerate(row):
cell.value = None
# Write updated data
if updated_xlsx is None:
updated_xlsx = schedule_xlsx
wb.save(updated_xlsx)
###
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
# make_option('--option',
# action='store',
# dest='option_attr',
# default=0,
# type='int',
# help='Help text',
# ),
)
args = '<conference> <xlsx-file>'
def handle(self, *args, **options):
try:
conference = args[0]
except IndexError:
raise CommandError('conference not specified')
try:
schedule_xlsx = args[1]
except IndexError:
raise CommandError('XLSX file not specified')
talks = (models.Talk.objects
.filter(conference=conference,
status='accepted'))
# Group by types
talk_types = {}
for talk in talks:
talk_type = talk.type[:1]
admin_type = talk.admin_type[:1]
if admin_type == 'm':
type = 'm'
elif admin_type == 'k':
type = 'k'
else:
type = talk_type
if type in talk_types:
talk_types[type].append(talk)
else:
talk_types[type] = [talk]
# Build data for updating the spreadsheet
data = []
talk_events = {}
for type, type_name, description in TYPE_NAMES:
# Get bag with talks
bag = talk_types.get(type, [])
if not bag:
continue
# Sort by talk title using title case
bag.sort(key=lambda talk: talk_title(talk).title())
# Add talks from bag to data
for talk in bag:
add_event(data,
talk=talk,
talk_events=talk_events,
session_type=type_name)
# Add events which are not talks
for schedule in models.Schedule.objects.filter(conference=conference):
for event in models.Event.objects.filter(schedule=schedule):
if event.pk in talk_events:
continue
add_event(data, event=event)
# Update spreadsheet with new data
update_schedule(schedule_xlsx, data)
| 28.966154 | 78 | 0.589654 |
794414b11d3ef77ab5505d2f8a324cdf3b5aebfb | 2,238 | py | Python | examples/ad_manager/v201908/creative_set_service/create_creative_set.py | ale180192/googleads-python-lib | 783a2d40a49956fb16ed73280708f6f9e322aa09 | [
"Apache-2.0"
] | 1 | 2019-09-30T06:36:07.000Z | 2019-09-30T06:36:07.000Z | examples/ad_manager/v201908/creative_set_service/create_creative_set.py | ale180192/googleads-python-lib | 783a2d40a49956fb16ed73280708f6f9e322aa09 | [
"Apache-2.0"
] | null | null | null | examples/ad_manager/v201908/creative_set_service/create_creative_set.py | ale180192/googleads-python-lib | 783a2d40a49956fb16ed73280708f6f9e322aa09 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates new creative sets.
To determine which creative sets exist, run get_all_creative_sets.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import uuid
# Import appropriate modules from the client library.
from googleads import ad_manager
MASTER_CREATIVE_ID = 'INSERT_MASTER_CREATIVE_ID_HERE'
COMPANION_CREATIVE_ID = 'INSERT_COMPANION_CREATIVE_ID_HERE'
def main(client, master_creative_id, companion_creative_id):
# Initialize appropriate service.
creative_set_service = client.GetService('CreativeSetService',
version='v201908')
# Create creative set objects.
creative_set = {'name': 'Creative set #%s' % uuid.uuid4(),
'masterCreativeId': master_creative_id,
'companionCreativeIds': [companion_creative_id]}
# Add creative sets.
creative_set = creative_set_service.createCreativeSet(creative_set)
# Display results.
if creative_set:
print(('Creative set with ID "%s", master creative ID "%s", and '
'companion creative IDs {%s} was created.')
% (creative_set['id'], creative_set['masterCreativeId'],
','.join(creative_set['companionCreativeIds'])))
if __name__ == '__main__':
# Initialize client object.
ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage()
main(ad_manager_client, MASTER_CREATIVE_ID, COMPANION_CREATIVE_ID)
| 36.096774 | 77 | 0.735925 |
79441544ca91d5ea849c1c3246bee4a92e42276d | 579 | py | Python | mongodb_subjects_storage/test_storage_atomic_ops.py | airspot-dev/krules-subjects-storage-mongodb | 4b9814a39abb7bb2e14e1006f59e8bcfcb4dd0e6 | [
"Apache-2.0"
] | 10 | 2021-02-26T13:00:22.000Z | 2022-03-31T11:38:28.000Z | mongodb_subjects_storage/test_storage_atomic_ops.py | airspot-dev/krules-subjects-storage-mongodb | 4b9814a39abb7bb2e14e1006f59e8bcfcb4dd0e6 | [
"Apache-2.0"
] | 1 | 2021-08-03T10:21:12.000Z | 2021-08-03T10:21:12.000Z | subjects_storages/mongodb/mongodb_subjects_storage/test_storage_atomic_ops.py | airspot-dev/krules | 0e402feef51c6189a163a62912480cfac0c438bb | [
"Apache-2.0"
] | null | null | null |
import os
from dependency_injector import providers
from krules_core.providers import subject_storage_factory
from .storage_impl import SubjectsMongoStorage
mongodb_url = os.environ.get("TEST_MONGODB_SUBJECTS_STORAGE_URL", "mongodb://localhost:27017/admin")
database = os.environ.get("TEST_MONGODB_SUBJECTS_STORAGE_DATABASE", "test")
subject_storage_factory.override(
providers.Factory(
lambda x: SubjectsMongoStorage(x, database, "test-subjects-atomic-ops", client_args=(mongodb_url,),
use_atomic_ops_collection=True))
)
| 32.166667 | 107 | 0.759931 |
794415d53fac8584fa23f47ebffdd7c2e8bd7e3f | 1,202 | py | Python | var/spack/repos/builtin/packages/leptonica/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2015-10-04T02:17:46.000Z | 2018-02-07T18:23:00.000Z | var/spack/repos/builtin/packages/leptonica/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22 | 2017-08-01T22:45:10.000Z | 2022-03-10T07:46:31.000Z | var/spack/repos/builtin/packages/leptonica/package.py | player1537-forks/spack | 822b7632222ec5a91dc7b7cda5fc0e08715bd47c | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4 | 2016-06-10T17:57:39.000Z | 2018-09-11T04:59:38.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Leptonica(CMakePackage):
"""Leptonica is an open source library containing software that is
broadly useful for image processing and image analysis applications."""
homepage = "http://www.leptonica.org/"
url = "https://github.com/DanBloomberg/leptonica/archive/1.80.0.tar.gz"
version('1.81.0', sha256='70ebc04ff8b9684205bd1d01843c635a8521255b74813bf7cce9a33368f7952c')
version('1.80.0', sha256='3952b974ec057d24267aae48c54bca68ead8275604bf084a73a4b953ff79196e')
version('1.79.0', sha256='bf9716f91a4844c2682a07ef21eaf68b6f1077af1f63f27c438394fd66218e17')
version('1.78.0', sha256='f8ac4d93cc76b524c2c81d27850bfc342e68b91368aa7a1f7d69e34ce13adbb4')
depends_on('giflib')
depends_on('jpeg')
depends_on('libpng')
depends_on('libtiff')
depends_on('zlib')
depends_on('libwebp+libwebpmux+libwebpdemux')
depends_on('openjpeg')
def cmake_args(self):
args = ['-DBUILD_SHARED_LIBS=ON']
return args
| 36.424242 | 96 | 0.747088 |
794416df479daef93c1bab0a7691d36a756f96da | 2,809 | py | Python | test/testBigrandom.py | turkeydonkey/nzmath3 | a48ae9efcf0d9ad1485c2e9863c948a7f1b20311 | [
"BSD-3-Clause"
] | 1 | 2021-05-26T19:22:17.000Z | 2021-05-26T19:22:17.000Z | test/testBigrandom.py | turkeydonkey/nzmath3 | a48ae9efcf0d9ad1485c2e9863c948a7f1b20311 | [
"BSD-3-Clause"
] | null | null | null | test/testBigrandom.py | turkeydonkey/nzmath3 | a48ae9efcf0d9ad1485c2e9863c948a7f1b20311 | [
"BSD-3-Clause"
] | null | null | null | import unittest
import random
import nzmath.bigrandom as bigrandom
class BigrandomTest(unittest.TestCase):
def testUniform(self):
trial_times = 100000
error_range = 0.03
dist = {}
for i in range(5, 100, 10):
dist[i] = 0
for i in range(trial_times):
rnd = bigrandom.randrange(5, 100, 10)
dist[rnd] += 1
for i in range(5, 100, 10):
self.assertTrue(abs(0.1 - dist[i]/float(trial_times)) < error_range)
dist = {}
for i in range(trial_times):
rnd = bigrandom.randrange(-1, 255)
dist[rnd] = dist.get(rnd, 0) + 1
distkeys = list(dist.keys())
distkeys.sort()
self.assertEqual(distkeys, list(range(-1, 255)))
def testRange(self):
for i in range(10000):
start = random.randrange(-5000, 1)**3
stop = random.randrange(1, 5000)**3
step = random.randrange(1, 200)
d = bigrandom.randrange(start, stop, step)
self.assertEqual(0, (d - start) % step)
self.assertTrue(start <= d < stop)
d = bigrandom.randrange(start**2, -stop**2, -step)
self.assertEqual(0, (d - start**2) % step)
self.assertTrue(start**2 >= d > -stop**2)
def testHugeRange(self):
self.assertTrue(2 <= bigrandom.randrange(2, 10**500) < 10**500)
def testValueError(self):
self.assertRaises(ValueError, bigrandom.randrange, 1, 50, 0)
self.assertRaises(ValueError, bigrandom.randrange, 0.5)
self.assertRaises(ValueError, bigrandom.randrange, 3, 4.5)
self.assertRaises(ValueError, bigrandom.randrange, 3, 20, 1.5)
self.assertRaises(ValueError, bigrandom.randrange, 3, 2)
self.assertRaises(ValueError, bigrandom.randrange, 3, 3)
class ChoiceTest (unittest.TestCase):
"""
tests for bigrandom.map_choice
"""
def testidentity(self):
i = lambda x: x
self.assertTrue(0 <= bigrandom.map_choice(i, 2**100) < 2**100)
def testeven(self):
double = lambda x: x + x
self.assertTrue(0 <= bigrandom.map_choice(double, 2**100) < 2**101)
self.assertEqual(0, bigrandom.map_choice(double, 2**100) % 2)
def testpartial(self):
def odd(n):
"""
Return None for even numbers.
"""
if n % 2:
return n
self.assertEqual(1, bigrandom.map_choice(odd, 2**100) % 2)
def suite(suffix="Test"):
suite = unittest.TestSuite()
all_names = globals()
for name in all_names:
if name.endswith(suffix):
suite.addTest(unittest.makeSuite(all_names[name], "test"))
return suite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite())
| 32.662791 | 80 | 0.583838 |
79441890239767e34e3e7fb73b5ae643540904bd | 6,386 | py | Python | supplier/views.py | AntInso/GreaterWMS | 9eabb1b9b0f5376dcccd89ed86dd76995955a8ec | [
"Apache-2.0"
] | 1 | 2021-02-17T14:04:29.000Z | 2021-02-17T14:04:29.000Z | supplier/views.py | AntInso/GreaterWMS | 9eabb1b9b0f5376dcccd89ed86dd76995955a8ec | [
"Apache-2.0"
] | null | null | null | supplier/views.py | AntInso/GreaterWMS | 9eabb1b9b0f5376dcccd89ed86dd76995955a8ec | [
"Apache-2.0"
] | null | null | null | from rest_framework import viewsets
from .models import ListModel
from . import serializers
from utils.page import MyPageNumberPagination
from rest_framework.filters import OrderingFilter
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.response import Response
from .filter import Filter
from rest_framework.exceptions import APIException
from .serializers import FileRenderSerializer
from django.http import StreamingHttpResponse
from .files import FileRenderCN, FileRenderEN
from rest_framework.settings import api_settings
class APIViewSet(viewsets.ModelViewSet):
"""
retrieve:
Response a data list(get)
list:
Response a data list(all)
create:
Create a data line(post)
delete:
Delete a data line(delete)
partial_update:
Partial_update a data(patch:partial_update)
update:
Update a data(put:update)
"""
queryset = ListModel.objects.all()
serializer_class = serializers.SupplierGetSerializer
pagination_class = MyPageNumberPagination
filter_backends = [DjangoFilterBackend, OrderingFilter, ]
ordering_fields = ['id', "create_time", "update_time", ]
filter_class = Filter
def get_project(self):
try:
id = self.kwargs.get('pk')
return id
except:
return None
def get_queryset(self):
id = self.get_project()
if self.request.user:
if id is None:
return self.queryset.filter(openid=self.request.auth.openid, is_delete=False)
else:
return self.queryset.filter(openid=self.request.auth.openid, id=id, is_delete=False)
else:
return self.queryset.none()
def get_serializer_class(self):
if self.action == 'list':
return serializers.SupplierGetSerializer
elif self.action == 'retrieve':
return serializers.SupplierGetSerializer
elif self.action == 'create':
return serializers.SupplierPostSerializer
elif self.action == 'update':
return serializers.SupplierUpdateSerializer
elif self.action == 'partial_update':
return serializers.SupplierPartialUpdateSerializer
elif self.action == 'destroy':
return serializers.SupplierGetSerializer
else:
return self.http_method_not_allowed(request=self.request)
def create(self, request, *args, **kwargs):
data = request.data
data['openid'] = request.auth.openid
if self.queryset.filter(openid=data['openid'], supplier_name=data['supplier_name'], is_delete=False).exists():
raise APIException({"detail": "Data exists"})
else:
serializer = self.get_serializer(data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers)
def update(self, request, pk):
qs = self.get_object()
if qs.openid != request.auth.openid:
raise APIException({"detail": "Cannot update data which not yours"})
else:
data = request.data
serializer = self.get_serializer(qs, data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers)
def partial_update(self, request, pk):
qs = self.get_object()
if qs.openid != request.auth.openid:
raise APIException({"detail": "Cannot partial_update data which not yours"})
else:
data = request.data
serializer = self.get_serializer(qs, data=data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers)
def destroy(self, request, pk):
qs = self.get_object()
if qs.openid != request.auth.openid:
raise APIException({"detail": "Cannot delete data which not yours"})
else:
qs.is_delete = True
qs.save()
serializer = self.get_serializer(qs, many=False)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=200, headers=headers)
class FileDownloadView(viewsets.ModelViewSet):
queryset = ListModel.objects.all()
serializer_class = serializers.FileRenderSerializer
renderer_classes = (FileRenderCN, ) + tuple(api_settings.DEFAULT_RENDERER_CLASSES)
filter_backends = [DjangoFilterBackend, OrderingFilter, ]
ordering_fields = ['id', "create_time", "update_time", ]
filter_class = Filter
def get_project(self):
try:
id = self.kwargs.get('pk')
return id
except:
return None
def get_queryset(self):
id = self.get_project()
if self.request.user:
if id is None:
return self.queryset.filter(openid=self.request.auth.openid, is_delete=False)
else:
return self.queryset.filter(openid=self.request.auth.openid, id=id, is_delete=False)
else:
return self.queryset.none()
def get_serializer_class(self):
if self.action == 'list':
return serializers.FileRenderSerializer
else:
return self.http_method_not_allowed(request=self.request)
def list(self, request, *args, **kwargs):
from datetime import datetime
dt = datetime.now()
data = (
FileRenderSerializer(instance).data
for instance in self.get_queryset()
)
if self.request.GET.get('lang', '') == 'zh-hans':
renderer = FileRenderCN().render(data)
else:
renderer = FileRenderEN().render(data)
response = StreamingHttpResponse(
renderer,
content_type="text/csv"
)
response['Content-Disposition'] = "attachment; filename='supplier_{}.csv'".format(str(dt.strftime('%Y%m%d%H%M%S%f')))
return response
| 37.564706 | 125 | 0.638898 |
794418e81e9309e19899863cd7000b5f651591fd | 3,024 | py | Python | jes/jes-v5.020-linux/jes/python/jes/gui/explorers.py | utv-teaching/foundations-computer-science | 568e19fd83a3355dab2814229f335abf31bfd7e9 | [
"MIT"
] | null | null | null | jes/jes-v5.020-linux/jes/python/jes/gui/explorers.py | utv-teaching/foundations-computer-science | 568e19fd83a3355dab2814229f335abf31bfd7e9 | [
"MIT"
] | null | null | null | jes/jes-v5.020-linux/jes/python/jes/gui/explorers.py | utv-teaching/foundations-computer-science | 568e19fd83a3355dab2814229f335abf31bfd7e9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
jes.gui.explorers
=================
This contains menu actions for launching the explorers.
:copyright: (C) 2014 Matthew Frazier and Mark Guzdial
:license: GNU GPL v2 or later, see jes/help/JESCopyright.txt for details
"""
import media
from javax.swing import JOptionPane
from jes.gui.components.actions import methodAction
class Explorers(object):
def __init__(self, parent, interpreter):
self.parentWindow = parent
self.interpreter = interpreter
self.actions = [
self.loadSoundTool,
self.loadPictureTool,
self.loadFrameSequencerTool
]
@methodAction(name="Sound Tool...")
def loadSoundTool(self):
"""
Examines the user namespace for sounds,
and presents a list for the user to pick from.
Then, the sound is opened in the sound tool.
"""
self._openExplorer("Sound Tool", media.openSoundTool,
"Sound", "sound", "sounds")
@methodAction(name="Picture Tool...")
def loadPictureTool(self):
"""
Examines the user namespace for pictures,
and presents a list for the user to pick from.
Then, the picture is opened in the picture tool.
"""
self._openExplorer("Picture Tool", media.openPictureTool,
"Picture", "picture", "pictures")
@methodAction(name="Movie Tool...")
def loadFrameSequencerTool(self):
"""
Examines the user namespace for movies,
and presents a list for the user to pick from.
Then, the movie is opened in the frame sequencer tool.
"""
self._openExplorer("Frame Sequencer Tool", media.openFrameSequencerTool,
"Movie", "movie", "movies")
###
### Internals
###
def _openExplorer(self, toolName, explorer, cls, singular, plural):
variables = self._findVariablesOfClass(cls)
if len(variables) > 0:
varname = self._showChoiceDialog(
"Open %s" % toolName, "Choose a %s to examine: " % singular,
variables.keys()
)
if varname is not None:
explorer(variables[varname])
else:
self._showErrorDialog(
"No %s" % plural, "There are no %s to examine." % plural
)
def _findVariablesOfClass(self, cls):
variables = {}
for name, obj in self.interpreter.namespace.items():
if getattr(type(obj), '__name__', None) == cls:
variables[name] = obj
return variables
def _showChoiceDialog(self, title, text, choices):
return JOptionPane.showInputDialog(
self.parentWindow, text, title, JOptionPane.INFORMATION_MESSAGE,
None, choices, choices[0]
)
def _showErrorDialog(self, title, text):
JOptionPane.showMessageDialog(
self.parentWindow, text, title, JOptionPane.ERROR_MESSAGE
)
| 31.831579 | 80 | 0.59623 |
794418f285825356842c85de08d0bfa9e42a69f8 | 688 | py | Python | setup.py | td2014/pkg_project | 58778613ceb818e09bdd09764fb918dd132edebe | [
"MIT"
] | null | null | null | setup.py | td2014/pkg_project | 58778613ceb818e09bdd09764fb918dd132edebe | [
"MIT"
] | null | null | null | setup.py | td2014/pkg_project | 58778613ceb818e09bdd09764fb918dd132edebe | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
# Needed to silence warnings (and to be a worthwhile package)
name='Testpkg',
url='https://github.com/td2014/pkg_project',
author='Anthony Daniell',
author_email='[email protected]',
# Needed to actually package something
packages=['testfuncpkg'],
# Needed for dependencies
install_requires=[''],
# *strongly* suggested for sharing
version='0.2',
# The license can be anything you like
license='MIT',
description='An example of a python package from pre-existing code',
# We will also need a readme eventually (there will be a warning)
long_description=open('README.txt').read(),
)
| 32.761905 | 72 | 0.693314 |
794418f41b4b9fbf3b17a8180aa363aec250ebe4 | 12,636 | py | Python | UMLRT2Kiltera_MM/Properties/Multiplicity/models/New1orMoreNamePart2_Complete_MDL.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 3 | 2017-06-02T19:26:27.000Z | 2021-06-14T04:25:45.000Z | UMLRT2Kiltera_MM/Properties/Multiplicity/models/New1orMoreNamePart2_Complete_MDL.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 8 | 2016-08-24T07:04:07.000Z | 2017-05-26T16:22:47.000Z | UMLRT2Kiltera_MM/Properties/Multiplicity/models/New1orMoreNamePart2_Complete_MDL.py | levilucio/SyVOLT | 7526ec794d21565e3efcc925a7b08ae8db27d46a | [
"MIT"
] | 1 | 2019-10-31T06:00:23.000Z | 2019-10-31T06:00:23.000Z | """
__New1orMoreNamePart2_Complete_MDL.py_____________________________________________________
Automatically generated AToM3 Model File (Do not modify directly)
Author: gehan
Modified: Mon Mar 2 13:56:28 2015
__________________________________________________________________________________________
"""
from stickylink import *
from widthXfillXdecoration import *
from LHS import *
from MT_pre__Name import *
from MT_pre__New import *
from MT_pre__directLink_T import *
from graph_MT_pre__directLink_T import *
from graph_LHS import *
from graph_MT_pre__New import *
from graph_MT_pre__Name import *
from ATOM3Enum import *
from ATOM3String import *
from ATOM3BottomType import *
from ATOM3Constraint import *
from ATOM3Attribute import *
from ATOM3Float import *
from ATOM3List import *
from ATOM3Link import *
from ATOM3Connection import *
from ATOM3Boolean import *
from ATOM3Appearance import *
from ATOM3Text import *
from ATOM3Action import *
from ATOM3Integer import *
from ATOM3Port import *
from ATOM3MSEnum import *
def New1orMoreNamePart2_Complete_MDL(self, rootNode, MT_pre__UMLRT2Kiltera_MMRootNode=None, MoTifRuleRootNode=None):
# --- Generating attributes code for ASG MT_pre__UMLRT2Kiltera_MM ---
if( MT_pre__UMLRT2Kiltera_MMRootNode ):
# author
MT_pre__UMLRT2Kiltera_MMRootNode.author.setValue('Annonymous')
# description
MT_pre__UMLRT2Kiltera_MMRootNode.description.setValue('\n')
MT_pre__UMLRT2Kiltera_MMRootNode.description.setHeight(15)
# name
MT_pre__UMLRT2Kiltera_MMRootNode.name.setValue('')
MT_pre__UMLRT2Kiltera_MMRootNode.name.setNone()
# --- ASG attributes over ---
# --- Generating attributes code for ASG MoTifRule ---
if( MoTifRuleRootNode ):
# author
MoTifRuleRootNode.author.setValue('Annonymous')
# description
MoTifRuleRootNode.description.setValue('\n')
MoTifRuleRootNode.description.setHeight(15)
# name
MoTifRuleRootNode.name.setValue('New1orMoreNamePart2_Complete')
# --- ASG attributes over ---
self.obj123=LHS(self)
self.obj123.isGraphObjectVisual = True
if(hasattr(self.obj123, '_setHierarchicalLink')):
self.obj123._setHierarchicalLink(False)
# constraint
self.obj123.constraint.setValue('#===============================================================================\n# This code is executed after the nodes in the LHS have been matched.\n# You can access a matched node labelled n by: PreNode(\'n\').\n# To access attribute x of node n, use: PreNode(\'n\')[\'x\'].\n# The given constraint must evaluate to a boolean expression:\n# returning True enables the rule to be applied,\n# returning False forbids the rule from being applied.\n#===============================================================================\n\nreturn True\n')
self.obj123.constraint.setHeight(15)
self.obj123.graphClass_= graph_LHS
if self.genGraphics:
new_obj = graph_LHS(20.0,40.0,self.obj123)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("LHS", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj123.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj123)
self.globalAndLocalPostcondition(self.obj123, rootNode)
self.obj123.postAction( rootNode.CREATE )
self.obj21147=MT_pre__Name(self)
self.obj21147.isGraphObjectVisual = True
if(hasattr(self.obj21147, '_setHierarchicalLink')):
self.obj21147._setHierarchicalLink(False)
# MT_pivotOut__
self.obj21147.MT_pivotOut__.setValue('')
self.obj21147.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj21147.MT_subtypeMatching__.setValue(('True', 0))
self.obj21147.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj21147.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj21147.MT_pre__classtype.setHeight(15)
# MT_pivotIn__
self.obj21147.MT_pivotIn__.setValue('')
self.obj21147.MT_pivotIn__.setNone()
# MT_label__
self.obj21147.MT_label__.setValue('2')
# MT_pre__cardinality
self.obj21147.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj21147.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj21147.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj21147.MT_pre__name.setHeight(15)
self.obj21147.graphClass_= graph_MT_pre__Name
if self.genGraphics:
new_obj = graph_MT_pre__Name(160.0,220.0,self.obj21147)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__Name", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj21147.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj21147)
self.globalAndLocalPostcondition(self.obj21147, rootNode)
self.obj21147.postAction( rootNode.CREATE )
self.obj124=MT_pre__New(self)
self.obj124.isGraphObjectVisual = True
if(hasattr(self.obj124, '_setHierarchicalLink')):
self.obj124._setHierarchicalLink(False)
# MT_label__
self.obj124.MT_label__.setValue('1')
# MT_pivotOut__
self.obj124.MT_pivotOut__.setValue('')
self.obj124.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj124.MT_subtypeMatching__.setValue(('True', 0))
self.obj124.MT_subtypeMatching__.config = 0
# MT_pre__classtype
self.obj124.MT_pre__classtype.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj124.MT_pre__classtype.setHeight(15)
# MT_pre__cardinality
self.obj124.MT_pre__cardinality.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj124.MT_pre__cardinality.setHeight(15)
# MT_pre__name
self.obj124.MT_pre__name.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj124.MT_pre__name.setHeight(15)
# MT_pivotIn__
self.obj124.MT_pivotIn__.setValue('element1')
self.obj124.graphClass_= graph_MT_pre__New
if self.genGraphics:
new_obj = graph_MT_pre__New(40.0,60.0,self.obj124)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__New", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
new_obj.layConstraints['scale'] = [1.0, 1.0]
else: new_obj = None
self.obj124.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj124)
self.globalAndLocalPostcondition(self.obj124, rootNode)
self.obj124.postAction( rootNode.CREATE )
self.obj21148=MT_pre__directLink_T(self)
self.obj21148.isGraphObjectVisual = True
if(hasattr(self.obj21148, '_setHierarchicalLink')):
self.obj21148._setHierarchicalLink(False)
# MT_label__
self.obj21148.MT_label__.setValue('3')
# MT_pivotOut__
self.obj21148.MT_pivotOut__.setValue('')
self.obj21148.MT_pivotOut__.setNone()
# MT_subtypeMatching__
self.obj21148.MT_subtypeMatching__.setValue(('True', 0))
self.obj21148.MT_subtypeMatching__.config = 0
# MT_pivotIn__
self.obj21148.MT_pivotIn__.setValue('')
self.obj21148.MT_pivotIn__.setNone()
# MT_pre__associationType
self.obj21148.MT_pre__associationType.setValue('\n#===============================================================================\n# This code is executed when evaluating if a node shall be matched by this rule.\n# You can access the value of the current node\'s attribute value by: attr_value.\n# You can access any attribute x of this node by: this[\'x\'].\n# If the constraint relies on attribute values from other nodes,\n# use the LHS/NAC constraint instead.\n# The given constraint must evaluate to a boolean expression.\n#===============================================================================\n\nreturn True\n')
self.obj21148.MT_pre__associationType.setHeight(15)
self.obj21148.graphClass_= graph_MT_pre__directLink_T
if self.genGraphics:
new_obj = graph_MT_pre__directLink_T(317.0,241.0,self.obj21148)
new_obj.DrawObject(self.UMLmodel)
self.UMLmodel.addtag_withtag("MT_pre__directLink_T", new_obj.tag)
new_obj.layConstraints = dict() # Graphical Layout Constraints
else: new_obj = None
self.obj21148.graphObject_ = new_obj
# Add node to the root: rootNode
rootNode.addNode(self.obj21148)
self.globalAndLocalPostcondition(self.obj21148, rootNode)
self.obj21148.postAction( rootNode.CREATE )
# Connections for obj123 (graphObject_: Obj0) of type LHS
self.drawConnections(
)
# Connections for obj21147 (graphObject_: Obj2) of type MT_pre__Name
self.drawConnections(
)
# Connections for obj124 (graphObject_: Obj1) of type MT_pre__New
self.drawConnections(
(self.obj124,self.obj21148,[257.0, 161.0, 317.0, 241.0],"true", 2) )
# Connections for obj21148 (graphObject_: Obj3) of type MT_pre__directLink_T
self.drawConnections(
(self.obj21148,self.obj21147,[317.0, 241.0, 377.0, 321.0],"true", 2) )
newfunction = New1orMoreNamePart2_Complete_MDL
loadedMMName = ['MT_pre__UMLRT2Kiltera_MM_META', 'MoTifRule_META']
atom3version = '0.3'
| 51.786885 | 632 | 0.670149 |
794418f9e3885c53d962d39ad98998d30c4940e9 | 5,203 | py | Python | litex_boards/targets/litefury.py | piotr-binkowski/litex-boards | bee71da7746c6fda0d4e1942452510e11f06c14a | [
"BSD-2-Clause"
] | 1 | 2022-02-20T00:06:32.000Z | 2022-02-20T00:06:32.000Z | litex_boards/targets/litefury.py | piotr-binkowski/litex-boards | bee71da7746c6fda0d4e1942452510e11f06c14a | [
"BSD-2-Clause"
] | null | null | null | litex_boards/targets/litefury.py | piotr-binkowski/litex-boards | bee71da7746c6fda0d4e1942452510e11f06c14a | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2018-2019 Rohit Singh <[email protected]>
# Copyright (c) 2019-2020 Florent Kermarrec <[email protected]>
# SPDX-License-Identifier: BSD-2-Clause
import os
import argparse
import sys
from migen import *
from litex_boards.platforms import litefury
from litex.soc.interconnect.csr import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.soc_sdram import *
from litex.soc.integration.builder import *
from litex.soc.cores.clock import *
from litex.soc.cores.led import LedChaser
from litedram.modules import AS4C256M16D3A
from litedram.phy import s7ddrphy
from litepcie.phy.s7pciephy import S7PCIEPHY
from litepcie.software import generate_litepcie_software
# CRG ----------------------------------------------------------------------------------------------
class CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.rst = Signal()
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys4x = ClockDomain(reset_less=True)
self.clock_domains.cd_sys4x_dqs = ClockDomain(reset_less=True)
self.clock_domains.cd_idelay = ClockDomain()
# Clk/Rst
clk200 = platform.request("clk200")
# PLL
self.submodules.pll = pll = S7PLL()
self.comb += pll.reset.eq(self.rst)
pll.register_clkin(clk200, 200e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
pll.create_clkout(self.cd_sys4x, 4*sys_clk_freq)
pll.create_clkout(self.cd_sys4x_dqs, 4*sys_clk_freq, phase=90)
pll.create_clkout(self.cd_idelay, 200e6)
platform.add_false_path_constraints(self.cd_sys.clk, pll.clkin) # Ignore sys_clk to pll.clkin path created by SoC's rst.
self.submodules.idelayctrl = S7IDELAYCTRL(self.cd_idelay)
# BaseSoC -----------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
def __init__(self, sys_clk_freq=int(100e6), with_pcie=False, **kwargs):
platform = litefury.Platform()
# SoCCore ----------------------------------------------------------------------------------
SoCCore.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on LiteFury",
ident_version = True,
**kwargs)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = CRG(platform, sys_clk_freq)
# DDR3 SDRAM -------------------------------------------------------------------------------
if not self.integrated_main_ram_size:
self.submodules.ddrphy = s7ddrphy.A7DDRPHY(platform.request("ddram"),
memtype = "DDR3",
nphases = 4,
sys_clk_freq = sys_clk_freq,
iodelay_clk_freq = 200e6)
self.add_csr("ddrphy")
self.add_sdram("sdram",
phy = self.ddrphy,
module = AS4C256M16D3A(sys_clk_freq, "1:4"),
origin = self.mem_map["main_ram"],
size = kwargs.get("max_sdram_size", 0x20000000),
l2_cache_size = kwargs.get("l2_size", 8192),
l2_cache_min_data_width = kwargs.get("min_l2_data_width", 128),
l2_cache_reverse = True
)
# PCIe -------------------------------------------------------------------------------------
if with_pcie:
self.submodules.pcie_phy = S7PCIEPHY(platform, platform.request("pcie_x4"),
data_width = 64,
bar0_size = 0x20000)
self.add_csr("pcie_phy")
self.add_pcie(phy=self.pcie_phy, ndmas=1)
# Leds -------------------------------------------------------------------------------------
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
self.add_csr("leds")
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on Aller")
parser.add_argument("--build", action="store_true", help="Build bitstream")
parser.add_argument("--sys-clk-freq", default=100e6, help="System clock frequency (default: 100MHz)")
parser.add_argument("--with-pcie", action="store_true", help="Enable PCIe support")
parser.add_argument("--driver", action="store_true", help="Generate LitePCIe driver")
builder_args(parser)
soc_sdram_args(parser)
args = parser.parse_args()
soc = BaseSoC(
sys_clk_freq = int(float(args.sys_clk_freq)),
with_pcie = args.with_pcie,
**soc_sdram_argdict(args)
)
builder = Builder(soc, **builder_argdict(args))
builder.build(run=args.build)
if args.driver:
generate_litepcie_software(soc, os.path.join(builder.output_dir, "driver"))
if __name__ == "__main__":
main()
| 40.333333 | 128 | 0.543917 |
794418fe354c4cb248aacb86012294e3cd555102 | 1,407 | py | Python | test/SegOptRoadTest.py | YCaptain/MapWorld-pred | 12f35cd0744cabe1303321e0256b17967fe43da9 | [
"MIT"
] | null | null | null | test/SegOptRoadTest.py | YCaptain/MapWorld-pred | 12f35cd0744cabe1303321e0256b17967fe43da9 | [
"MIT"
] | null | null | null | test/SegOptRoadTest.py | YCaptain/MapWorld-pred | 12f35cd0744cabe1303321e0256b17967fe43da9 | [
"MIT"
] | null | null | null | import unittest
import os
import sys
import cv2
import numpy as np
from PIL import Image
root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
src_dir = os.path.join(root_dir, "src")
sys.path.insert(0, src_dir)
# change cwd to root dir
os.chdir(root_dir)
from utils.seg_opt import SegmentOutputUtil
class SegOptTest(unittest.TestCase):
@classmethod
def setUpClass(self):
pred = SegmentOutputUtil.load_img("tmp/results/a00d13ba_Road-Deeplab_0.png")
w, h = pred.shape
self.meta = {
"w": w,
"h": h
}
self.util = SegmentOutputUtil(pred, self.meta, "Road")
self.pred = self.util.f_augment(pred)
def test_road_postprocess(self):
skel = self.pred
for i in range(2):
skel = self.util.get_skeleton(skel)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
for i in range(4):
skel = cv2.dilate(skel, kernel, iterations=5)
skel = SegmentOutputUtil.connect_line(skel, 10, 1)
mid_line = self.util.get_skeleton(skel)
img = np.zeros(skel.shape).astype(np.uint8)
img[skel == 255] = [50]
img[mid_line == 255] = [255]
alpha = Image.fromarray(skel)
img = Image.merge('LA', [Image.fromarray(img), alpha])
img.show()
if __name__ == '__main__':
unittest.main()
| 26.055556 | 84 | 0.617626 |
79441986de492436d6bfae8ef3918d3f3e660314 | 4,022 | py | Python | src/nsupdate/main/forms.py | mirzazulfan/nsupdate.info | fdd12e8f47d084969e23517fce4b8efa3212dd9e | [
"BSD-3-Clause"
] | 774 | 2015-01-01T23:24:50.000Z | 2022-03-29T01:40:41.000Z | src/nsupdate/main/forms.py | mirzazulfan/nsupdate.info | fdd12e8f47d084969e23517fce4b8efa3212dd9e | [
"BSD-3-Clause"
] | 272 | 2015-01-02T12:23:41.000Z | 2022-02-21T14:18:11.000Z | src/nsupdate/main/forms.py | mirzazulfan/nsupdate.info | fdd12e8f47d084969e23517fce4b8efa3212dd9e | [
"BSD-3-Clause"
] | 100 | 2015-03-05T15:11:09.000Z | 2022-03-09T18:39:39.000Z | # -*- coding: utf-8 -*-
"""
form definitions (which fields are available, order, autofocus, ...)
"""
import binascii
from django import forms
from django.utils.translation import ugettext_lazy as _
from .models import Host, RelatedHost, Domain, ServiceUpdaterHostConfig
from .dnstools import check_domain, NameServerNotAvailable
class CreateHostForm(forms.ModelForm):
class Meta(object):
model = Host
fields = ['name', 'domain', 'comment']
widgets = {
'name': forms.widgets.TextInput(attrs=dict(autofocus=None)),
}
class EditHostForm(forms.ModelForm):
class Meta(object):
model = Host
fields = ['comment', 'available', 'abuse', 'netmask_ipv4', 'netmask_ipv6']
netmask_ipv4 = forms.IntegerField(min_value=0, max_value=32)
netmask_ipv6 = forms.IntegerField(min_value=0, max_value=64)
class CreateRelatedHostForm(forms.ModelForm):
class Meta(object):
model = RelatedHost
fields = ['name', 'comment', 'available', 'interface_id_ipv4', 'interface_id_ipv6']
widgets = {
'name': forms.widgets.TextInput(attrs=dict(autofocus=None)),
}
class EditRelatedHostForm(forms.ModelForm):
class Meta(object):
model = RelatedHost
fields = ['name', 'comment', 'available', 'interface_id_ipv4', 'interface_id_ipv6']
class CreateDomainForm(forms.ModelForm):
def clean_nameserver_update_secret(self):
secret = self.cleaned_data['nameserver_update_secret']
try:
binascii.a2b_base64(secret.encode(encoding="ascii", errors="strict"))
except (binascii.Error, UnicodeEncodeError):
raise forms.ValidationError(_("Enter a valid secret in base64 format."), code='invalid')
return secret
class Meta(object):
model = Domain
fields = ['name', 'nameserver_ip', 'nameserver2_ip', 'nameserver_update_algorithm', 'comment']
widgets = {
'name': forms.widgets.TextInput(attrs=dict(autofocus=None)),
}
class EditDomainForm(forms.ModelForm):
def clean_nameserver_update_secret(self):
secret = self.cleaned_data['nameserver_update_secret']
try:
binascii.a2b_base64(secret.encode(encoding="ascii", errors="strict"))
except (binascii.Error, UnicodeEncodeError):
raise forms.ValidationError(_("Enter a valid secret in base64 format."), code='invalid')
return secret
def clean(self):
cleaned_data = super(EditDomainForm, self).clean()
if self.cleaned_data['available']:
try:
check_domain(self.instance.name)
except (NameServerNotAvailable, ):
raise forms.ValidationError(
_("Failed to add/delete host connectivity-test.%(domain)s, check your DNS server configuration. "
"This is a requirement for setting the available flag."),
code='invalid',
params={'domain': self.instance.name}
)
if cleaned_data['public'] and not cleaned_data['available']:
raise forms.ValidationError(
_("Domain must be available to be public"),
code='invalid')
class Meta(object):
model = Domain
fields = ['comment', 'nameserver_ip', 'nameserver2_ip', 'public', 'available',
'nameserver_update_algorithm', 'nameserver_update_secret']
class CreateUpdaterHostConfigForm(forms.ModelForm):
class Meta(object):
model = ServiceUpdaterHostConfig
fields = ['service', 'hostname', 'name', 'password',
'give_ipv4', 'give_ipv6', 'comment']
widgets = {
'hostname': forms.widgets.TextInput(attrs=dict(autofocus=None)),
}
class EditUpdaterHostConfigForm(forms.ModelForm):
class Meta(object):
model = ServiceUpdaterHostConfig
fields = ['hostname', 'comment', 'name', 'password',
'give_ipv4', 'give_ipv6']
| 34.973913 | 117 | 0.636748 |
79441b1e0bd62fbb44e5f4005e7d63ea5562bc7f | 4,381 | py | Python | test_2_spin/activityTest.py | NoOneZero/Neuro | a3cf1e2a701ee0096f093d332237dc30f8f83a50 | [
"Apache-2.0"
] | null | null | null | test_2_spin/activityTest.py | NoOneZero/Neuro | a3cf1e2a701ee0096f093d332237dc30f8f83a50 | [
"Apache-2.0"
] | null | null | null | test_2_spin/activityTest.py | NoOneZero/Neuro | a3cf1e2a701ee0096f093d332237dc30f8f83a50 | [
"Apache-2.0"
] | null | null | null | from common.activity import Activity
from common.dataWriter import DataWriter
from common.geneticAlgorithmParams import GeneticAlgorithmParams
from common.monitor import Monitor
from common.character import Character
from test_2_spin.personTest import PersonTest
from test_2_spin.characterTest import CharacterTest
from test_2_spin.geneticAlgorithmParamsTest import GeneticAlgorithmParamsTest
import datetime
class ActivityTest(Activity): # test2 spin
def __init__(self, monitor: Monitor = None,
genetic_algorithm_params: GeneticAlgorithmParams = None,
data_writer: DataWriter = None):
super(ActivityTest, self).__init__(monitor, genetic_algorithm_params, data_writer)
self.init(monitor, genetic_algorithm_params, data_writer)
def init(self, monitor: Monitor = None,
genetic_algorithm_params: GeneticAlgorithmParams = None,
data_writer: DataWriter = None):
self._system_init(monitor)
self._simulation_params_init(genetic_algorithm_params)
self._character_init()
self._enemy_init()
self._environment_init()
self._data_writer_init(data_writer)
def _system_init(self, monitor: Monitor = None) -> None:
self.monitor = monitor or Monitor()
def _simulation_params_init(self, genetic_algorithm_params: GeneticAlgorithmParams = None) -> None:
self.epoch = 0
self.iteration = 0
self.is_epoch_work: bool = True
self.is_iteration_work: bool = True
self.start_time = datetime.datetime.now()
self.genetic_algorithm_params = genetic_algorithm_params or GeneticAlgorithmParams()
def _character_init(self) -> None:
self.character = []
for i in range(self.genetic_algorithm_params._start_population):
self.character.append(Character())
def _enemy_init(self) -> None: pass
def _environment_init(self) -> None: pass
def _data_writer_init(self, data_writer: DataWriter = None) -> None:
self.data_writer = data_writer or DataWriter()
def loop(self) -> None:
while self.is_epoch_work:
self._run_iteration_cycle()
self._calculate_fitness_function_epoch()
self._save_character_data()
self._create_new_population()
self._check_loop_epoch_condition()
def _run_iteration_cycle(self) -> None:
while self.is_iteration_work:
self._control_input()
self._move_environment()
self._collide_environment()
self._move_enemies()
self._collide_enemies()
self._kill_enemies()
self._move_character()
self._collide_character()
self._kill_character()
self._calculate_fitness_function_iteration()
self._draw_all()
self._write_data_on_screen()
self._check_loop_iteration_condition()
def _control_input(self): self.monitor.control_input()
def _move_environment(self): pass
def _collide_environment(self): pass
def _move_enemies(self): pass
def _collide_enemies(self): pass
def _kill_enemies(self): pass
def _move_character(self): pass
def _collide_character(self): pass
def _kill_character(self): pass
def _calculate_fitness_function_iteration(self): pass
def _draw_all(self): self.monitor.draw(environment=[], enemies=[], character=self.character)
def _write_data_on_screen(self):
self.monitor.write_data_on_screen("Test e:{}, i:{}".format(self.epoch, self.iteration))
def _check_loop_iteration_condition(self):
self.iteration += 1
if self.iteration >= self.genetic_algorithm_params.get_max_iteration():
self.is_iteration_work = False
def _calculate_fitness_function_epoch(self): pass
def _save_character_data(self) -> None:pass
def _create_new_population(self) -> None: pass
def _check_loop_epoch_condition(self) -> None:
self._count_epoch()
self._check_stop_epoch_condition()
def _count_epoch(self) -> None:
self.epoch += 1
def _check_stop_epoch_condition(self) -> None:
if self.epoch >= self.genetic_algorithm_params.get_max_epoch():
self.is_epoch_work = False
else:
self.is_iteration_work = True
self.iteration = 0
| 34.769841 | 103 | 0.692079 |
79441b232333dac519c67804b8e816b2608ecf04 | 691 | py | Python | var/spack/repos/builtin/packages/r-splancs/package.py | jameshclrk/spack | 1f8fcb36091e1d5ae63a2279a958ca3ff57088bf | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2021-03-19T13:12:47.000Z | 2021-03-19T13:12:47.000Z | var/spack/repos/builtin/packages/r-splancs/package.py | jameshclrk/spack | 1f8fcb36091e1d5ae63a2279a958ca3ff57088bf | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2019-01-11T20:11:52.000Z | 2019-01-11T20:11:52.000Z | var/spack/repos/builtin/packages/r-splancs/package.py | jameshclrk/spack | 1f8fcb36091e1d5ae63a2279a958ca3ff57088bf | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-10-14T14:20:17.000Z | 2020-10-14T14:20:17.000Z | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RSplancs(RPackage):
"""Spatial and Space-Time Point Pattern Analysis"""
homepage = "https://cran.r-project.org/web/packages/splancs/index.html"
url = "https://cran.r-project.org/src/contrib/splancs_2.01-40.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/splancs"
version('2.01-40', 'dc08a5c9a1fd2098d78459152f4917ce')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
| 34.55 | 78 | 0.700434 |
79441bab58a27e0ddf9b50eea20b4b45964154a9 | 738 | py | Python | molsysmt/item/string_pdb_id/to_file_msmpk.py | uibcdf/MolModMTs | 4f6b6f671a9fa3e73008d1e9c48686d5f20a6573 | [
"MIT"
] | null | null | null | molsysmt/item/string_pdb_id/to_file_msmpk.py | uibcdf/MolModMTs | 4f6b6f671a9fa3e73008d1e9c48686d5f20a6573 | [
"MIT"
] | null | null | null | molsysmt/item/string_pdb_id/to_file_msmpk.py | uibcdf/MolModMTs | 4f6b6f671a9fa3e73008d1e9c48686d5f20a6573 | [
"MIT"
] | null | null | null | from molsysmt._private.exceptions import *
from molsysmt._private.digestion import *
def to_file_msmpk(item, atom_indices='all', structure_indices='all', output_filename=None, check=True):
if check:
digest_item(item, 'string:pdb_id')
atom_indices = digest_atom_indices(atom_indices)
structure_indices = digest_structure_indices(structure_indices)
from . import to_molsysmt_MolSys
from ..molsysmt_MolSys import to_file_msmpk as molsysmt_MolSys_to_file_msmpk
tmp_item = to_molsysmt_MolSys(item, atom_indices=atom_indices, structure_indices=structure_indices, check=False)
tmp_item = molsysmt_MolSys_to_file_msmpk(tmp_item, output_filename=output_filename, check=False)
return tmp_item
| 36.9 | 116 | 0.789973 |
79441c96878079111a3e18f23aba2127ced1c309 | 45,590 | py | Python | discord/channel.py | LetsChill/discord.py | 7e373441fc8502f30e60272737c0074d424373d0 | [
"MIT"
] | null | null | null | discord/channel.py | LetsChill/discord.py | 7e373441fc8502f30e60272737c0074d424373d0 | [
"MIT"
] | null | null | null | discord/channel.py | LetsChill/discord.py | 7e373441fc8502f30e60272737c0074d424373d0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import time
import asyncio
import discord.abc
from .permissions import Permissions
from .enums import ChannelType, try_enum, VoiceRegion
from .mixins import Hashable
from . import utils
from .asset import Asset
from .errors import ClientException, NoMoreItems, InvalidArgument
__all__ = (
'TextChannel',
'VoiceChannel',
'DMChannel',
'CategoryChannel',
'StoreChannel',
'GroupChannel',
'_channel_factory',
)
async def _single_delete_strategy(messages):
for m in messages:
await m.delete()
class TextChannel(discord.abc.Messageable, discord.abc.GuildChannel, Hashable):
"""Represents a Discord guild text channel.
.. container:: operations
.. describe:: x == y
Checks if two channels are equal.
.. describe:: x != y
Checks if two channels are not equal.
.. describe:: hash(x)
Returns the channel's hash.
.. describe:: str(x)
Returns the channel's name.
Attributes
-----------
name: :class:`str`
The channel name.
guild: :class:`Guild`
The guild the channel belongs to.
id: :class:`int`
The channel ID.
category_id: Optional[:class:`int`]
The category channel ID this channel belongs to, if applicable.
topic: Optional[:class:`str`]
The channel's topic. ``None`` if it doesn't exist.
position: :class:`int`
The position in the channel list. This is a number that starts at 0. e.g. the
top channel is position 0.
last_message_id: Optional[:class:`int`]
The last message ID of the message sent to this channel. It may
*not* point to an existing or valid message.
slowmode_delay: :class:`int`
The number of seconds a member must wait between sending messages
in this channel. A value of `0` denotes that it is disabled.
Bots and users with :attr:`~Permissions.manage_channels` or
:attr:`~Permissions.manage_messages` bypass slowmode.
"""
__slots__ = ('name', 'id', 'guild', 'topic', '_state', 'nsfw',
'category_id', 'position', 'slowmode_delay', '_overwrites',
'_type', 'last_message_id')
def __init__(self, *, state, guild, data):
self._state = state
self.id = int(data['id'])
self._type = data['type']
self._update(guild, data)
def __repr__(self):
attrs = [
('id', self.id),
('name', self.name),
('position', self.position),
('nsfw', self.nsfw),
('news', self.is_news()),
('category_id', self.category_id)
]
return '<%s %s>' % (self.__class__.__name__, ' '.join('%s=%r' % t for t in attrs))
def _update(self, guild, data):
self.guild = guild
self.name = data['name']
self.category_id = utils._get_as_snowflake(data, 'parent_id')
self.topic = data.get('topic')
self.position = data['position']
self.nsfw = data.get('nsfw', False)
# Does this need coercion into `int`? No idea yet.
self.slowmode_delay = data.get('rate_limit_per_user', 0)
self._type = data.get('type', self._type)
self.last_message_id = utils._get_as_snowflake(data, 'last_message_id')
self._fill_overwrites(data)
async def _get_channel(self):
return self
@property
def type(self):
""":class:`ChannelType`: The channel's Discord type."""
return try_enum(ChannelType, self._type)
@property
def _sorting_bucket(self):
return ChannelType.text.value
@utils.copy_doc(discord.abc.GuildChannel.permissions_for)
def permissions_for(self, member):
base = super().permissions_for(member)
# text channels do not have voice related permissions
denied = Permissions.voice()
base.value &= ~denied.value
return base
@property
def members(self):
"""List[:class:`Member`]: Returns all members that can see this channel."""
return [m for m in self.guild.members if self.permissions_for(m).read_messages]
def is_nsfw(self):
""":class:`bool`: Checks if the channel is NSFW."""
return self.nsfw
def is_news(self):
""":class:`bool`: Checks if the channel is a news channel."""
return self._type == ChannelType.news.value
@property
def last_message(self):
"""Fetches the last message from this channel in cache.
The message might not be valid or point to an existing message.
.. admonition:: Reliable Fetching
:class: helpful
For a slightly more reliable method of fetching the
last message, consider using either :meth:`history`
or :meth:`fetch_message` with the :attr:`last_message_id`
attribute.
Returns
---------
Optional[:class:`Message`]
The last message in this channel or ``None`` if not found.
"""
return self._state._get_message(self.last_message_id) if self.last_message_id else None
async def edit(self, *, reason=None, **options):
"""|coro|
Edits the channel.
You must have the :attr:`~Permissions.manage_channels` permission to
use this.
.. versionchanged:: 1.3
The ``overwrites`` keyword-only parameter was added.
.. versionchanged:: 1.4
The ``type`` keyword-only parameter was added.
Parameters
----------
name: :class:`str`
The new channel name.
topic: :class:`str`
The new channel's topic.
position: :class:`int`
The new channel's position.
nsfw: :class:`bool`
To mark the channel as NSFW or not.
sync_permissions: :class:`bool`
Whether to sync permissions with the channel's new or pre-existing
category. Defaults to ``False``.
category: Optional[:class:`CategoryChannel`]
The new category for this channel. Can be ``None`` to remove the
category.
slowmode_delay: :class:`int`
Specifies the slowmode rate limit for user in this channel, in seconds.
A value of `0` disables slowmode. The maximum value possible is `21600`.
type: :class:`ChannelType`
Change the type of this text channel. Currently, only conversion between
:attr:`ChannelType.text` and :attr:`ChannelType.news` is supported. This
is only available to guilds that contain ``NEWS`` in :attr:`Guild.features`.
reason: Optional[:class:`str`]
The reason for editing this channel. Shows up on the audit log.
overwrites: :class:`dict`
A :class:`dict` of target (either a role or a member) to
:class:`PermissionOverwrite` to apply to the channel.
Raises
------
InvalidArgument
If position is less than 0 or greater than the number of channels, or if
the permission overwrite information is not in proper form.
Forbidden
You do not have permissions to edit the channel.
HTTPException
Editing the channel failed.
"""
await self._edit(options, reason=reason)
@utils.copy_doc(discord.abc.GuildChannel.clone)
async def clone(self, *, name=None, reason=None):
return await self._clone_impl({
'topic': self.topic,
'nsfw': self.nsfw,
'rate_limit_per_user': self.slowmode_delay
}, name=name, reason=reason)
async def delete_messages(self, messages):
"""|coro|
Deletes a list of messages. This is similar to :meth:`Message.delete`
except it bulk deletes multiple messages.
As a special case, if the number of messages is 0, then nothing
is done. If the number of messages is 1 then single message
delete is done. If it's more than two, then bulk delete is used.
You cannot bulk delete more than 100 messages or messages that
are older than 14 days old.
You must have the :attr:`~Permissions.manage_messages` permission to
use this.
Usable only by bot accounts.
Parameters
-----------
messages: Iterable[:class:`abc.Snowflake`]
An iterable of messages denoting which ones to bulk delete.
Raises
------
ClientException
The number of messages to delete was more than 100.
Forbidden
You do not have proper permissions to delete the messages or
you're not using a bot account.
NotFound
If single delete, then the message was already deleted.
HTTPException
Deleting the messages failed.
"""
if not isinstance(messages, (list, tuple)):
messages = list(messages)
if len(messages) == 0:
return # do nothing
if len(messages) == 1:
message_id = messages[0].id
await self._state.http.delete_message(self.id, message_id)
return
if len(messages) > 100:
raise ClientException('Can only bulk delete messages up to 100 messages')
message_ids = [m.id for m in messages]
await self._state.http.delete_messages(self.id, message_ids)
async def purge(self, *, limit=100, check=None, before=None, after=None, around=None, oldest_first=False, bulk=True):
"""|coro|
Purges a list of messages that meet the criteria given by the predicate
``check``. If a ``check`` is not provided then all messages are deleted
without discrimination.
You must have the :attr:`~Permissions.manage_messages` permission to
delete messages even if they are your own (unless you are a user
account). The :attr:`~Permissions.read_message_history` permission is
also needed to retrieve message history.
Internally, this employs a different number of strategies depending
on the conditions met such as if a bulk delete is possible or if
the account is a user bot or not.
Examples
---------
Deleting bot's messages ::
def is_me(m):
return m.author == client.user
deleted = await channel.purge(limit=100, check=is_me)
await channel.send('Deleted {} message(s)'.format(len(deleted)))
Parameters
-----------
limit: Optional[:class:`int`]
The number of messages to search through. This is not the number
of messages that will be deleted, though it can be.
check: Callable[[:class:`Message`], :class:`bool`]
The function used to check if a message should be deleted.
It must take a :class:`Message` as its sole parameter.
before: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]
Same as ``before`` in :meth:`history`.
after: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]
Same as ``after`` in :meth:`history`.
around: Optional[Union[:class:`abc.Snowflake`, :class:`datetime.datetime`]]
Same as ``around`` in :meth:`history`.
oldest_first: Optional[:class:`bool`]
Same as ``oldest_first`` in :meth:`history`.
bulk: :class:`bool`
If ``True``, use bulk delete. Setting this to ``False`` is useful for mass-deleting
a bot's own messages without :attr:`Permissions.manage_messages`. When ``True``, will
fall back to single delete if current account is a user bot (now deprecated), or if messages are
older than two weeks.
Raises
-------
Forbidden
You do not have proper permissions to do the actions required.
HTTPException
Purging the messages failed.
Returns
--------
List[:class:`.Message`]
The list of messages that were deleted.
"""
if check is None:
check = lambda m: True
iterator = self.history(limit=limit, before=before, after=after, oldest_first=oldest_first, around=around)
ret = []
count = 0
minimum_time = int((time.time() - 14 * 24 * 60 * 60) * 1000.0 - 1420070400000) << 22
strategy = self.delete_messages if self._state.is_bot and bulk else _single_delete_strategy
while True:
try:
msg = await iterator.next()
except NoMoreItems:
# no more messages to poll
if count >= 2:
# more than 2 messages -> bulk delete
to_delete = ret[-count:]
await strategy(to_delete)
elif count == 1:
# delete a single message
await ret[-1].delete()
return ret
else:
if count == 100:
# we've reached a full 'queue'
to_delete = ret[-100:]
await strategy(to_delete)
count = 0
await asyncio.sleep(1)
if check(msg):
if msg.id < minimum_time:
# older than 14 days old
if count == 1:
await ret[-1].delete()
elif count >= 2:
to_delete = ret[-count:]
await strategy(to_delete)
count = 0
strategy = _single_delete_strategy
count += 1
ret.append(msg)
async def webhooks(self):
"""|coro|
Gets the list of webhooks from this channel.
Requires :attr:`~.Permissions.manage_webhooks` permissions.
Raises
-------
Forbidden
You don't have permissions to get the webhooks.
Returns
--------
List[:class:`Webhook`]
The webhooks for this channel.
"""
from .webhook import Webhook
data = await self._state.http.channel_webhooks(self.id)
return [Webhook.from_state(d, state=self._state) for d in data]
async def create_webhook(self, *, name, avatar=None, reason=None):
"""|coro|
Creates a webhook for this channel.
Requires :attr:`~.Permissions.manage_webhooks` permissions.
.. versionchanged:: 1.1
Added the ``reason`` keyword-only parameter.
Parameters
-------------
name: :class:`str`
The webhook's name.
avatar: Optional[:class:`bytes`]
A :term:`py:bytes-like object` representing the webhook's default avatar.
This operates similarly to :meth:`~ClientUser.edit`.
reason: Optional[:class:`str`]
The reason for creating this webhook. Shows up in the audit logs.
Raises
-------
HTTPException
Creating the webhook failed.
Forbidden
You do not have permissions to create a webhook.
Returns
--------
:class:`Webhook`
The created webhook.
"""
from .webhook import Webhook
if avatar is not None:
avatar = utils._bytes_to_base64_data(avatar)
data = await self._state.http.create_webhook(self.id, name=str(name), avatar=avatar, reason=reason)
return Webhook.from_state(data, state=self._state)
async def follow(self, *, destination, reason=None):
"""
Follows a channel using a webhook.
Only news channels can be followed.
.. note::
The webhook returned will not provide a token to do webhook
actions, as Discord does not provide it.
.. versionadded:: 1.3
Parameters
-----------
destination: :class:`TextChannel`
The channel you would like to follow from.
reason: Optional[:class:`str`]
The reason for following the channel. Shows up on the destination guild's audit log.
.. versionadded:: 1.4
Raises
-------
HTTPException
Following the channel failed.
Forbidden
You do not have the permissions to create a webhook.
Returns
--------
:class:`Webhook`
The created webhook.
"""
if not self.is_news():
raise ClientException('The channel must be a news channel.')
if not isinstance(destination, TextChannel):
raise InvalidArgument('Expected TextChannel received {0.__name__}'.format(type(destination)))
from .webhook import Webhook
data = await self._state.http.follow_webhook(self.id, webhook_channel_id=destination.id, reason=reason)
return Webhook._as_follower(data, channel=destination, user=self._state.user)
def get_partial_message(self, message_id):
"""Creates a :class:`PartialMessage` from the message ID.
This is useful if you want to work with a message and only have its ID without
doing an unnecessary API call.
.. versionadded:: 1.6
Parameters
------------
message_id: :class:`int`
The message ID to create a partial message for.
Returns
---------
:class:`PartialMessage`
The partial message.
"""
from .message import PartialMessage
return PartialMessage(channel=self, id=message_id)
class VoiceChannel(discord.abc.Connectable, discord.abc.GuildChannel, Hashable):
"""Represents a Discord guild voice channel.
.. container:: operations
.. describe:: x == y
Checks if two channels are equal.
.. describe:: x != y
Checks if two channels are not equal.
.. describe:: hash(x)
Returns the channel's hash.
.. describe:: str(x)
Returns the channel's name.
Attributes
-----------
name: :class:`str`
The channel name.
guild: :class:`Guild`
The guild the channel belongs to.
id: :class:`int`
The channel ID.
category_id: Optional[:class:`int`]
The category channel ID this channel belongs to, if applicable.
position: :class:`int`
The position in the channel list. This is a number that starts at 0. e.g. the
top channel is position 0.
bitrate: :class:`int`
The channel's preferred audio bitrate in bits per second.
user_limit: :class:`int`
The channel's limit for number of members that can be in a voice channel.
rtc_region: Optional[:class:`VoiceRegion`]
The region for the voice channel's voice communication.
A value of ``None`` indicates automatic voice region detection.
.. versionadded:: 1.7
"""
__slots__ = ('name', 'id', 'guild', 'bitrate', 'user_limit',
'_state', 'position', '_overwrites', 'category_id',
'rtc_region')
def __init__(self, *, state, guild, data):
self._state = state
self.id = int(data['id'])
self._update(guild, data)
def __repr__(self):
attrs = [
('id', self.id),
('name', self.name),
('rtc_region', self.rtc_region),
('position', self.position),
('bitrate', self.bitrate),
('user_limit', self.user_limit),
('category_id', self.category_id)
]
return '<%s %s>' % (self.__class__.__name__, ' '.join('%s=%r' % t for t in attrs))
def _get_voice_client_key(self):
return self.guild.id, 'guild_id'
def _get_voice_state_pair(self):
return self.guild.id, self.id
@property
def type(self):
""":class:`ChannelType`: The channel's Discord type."""
return ChannelType.voice
def _update(self, guild, data):
self.guild = guild
self.name = data['name']
self.rtc_region = data.get('rtc_region')
if self.rtc_region:
self.rtc_region = try_enum(VoiceRegion, self.rtc_region)
self.category_id = utils._get_as_snowflake(data, 'parent_id')
self.position = data['position']
self.bitrate = data.get('bitrate')
self.user_limit = data.get('user_limit')
self._fill_overwrites(data)
@property
def _sorting_bucket(self):
return ChannelType.voice.value
@property
def members(self):
"""List[:class:`Member`]: Returns all members that are currently inside this voice channel."""
ret = []
for user_id, state in self.guild._voice_states.items():
if state.channel.id == self.id:
member = self.guild.get_member(user_id)
if member is not None:
ret.append(member)
return ret
@property
def voice_states(self):
"""Returns a mapping of member IDs who have voice states in this channel.
.. versionadded:: 1.3
.. note::
This function is intentionally low level to replace :attr:`members`
when the member cache is unavailable.
Returns
--------
Mapping[:class:`int`, :class:`VoiceState`]
The mapping of member ID to a voice state.
"""
return {key: value for key, value in self.guild._voice_states.items() if value.channel.id == self.id}
@utils.copy_doc(discord.abc.GuildChannel.permissions_for)
def permissions_for(self, member):
base = super().permissions_for(member)
# voice channels cannot be edited by people who can't connect to them
# It also implicitly denies all other voice perms
if not base.connect:
denied = Permissions.voice()
denied.update(manage_channels=True, manage_roles=True)
base.value &= ~denied.value
return base
@utils.copy_doc(discord.abc.GuildChannel.clone)
async def clone(self, *, name=None, reason=None):
return await self._clone_impl({
'bitrate': self.bitrate,
'user_limit': self.user_limit
}, name=name, reason=reason)
async def edit(self, *, reason=None, **options):
"""|coro|
Edits the channel.
You must have the :attr:`~Permissions.manage_channels` permission to
use this.
.. versionchanged:: 1.3
The ``overwrites`` keyword-only parameter was added.
Parameters
----------
name: :class:`str`
The new channel's name.
bitrate: :class:`int`
The new channel's bitrate.
user_limit: :class:`int`
The new channel's user limit.
position: :class:`int`
The new channel's position.
sync_permissions: :class:`bool`
Whether to sync permissions with the channel's new or pre-existing
category. Defaults to ``False``.
category: Optional[:class:`CategoryChannel`]
The new category for this channel. Can be ``None`` to remove the
category.
reason: Optional[:class:`str`]
The reason for editing this channel. Shows up on the audit log.
overwrites: :class:`dict`
A :class:`dict` of target (either a role or a member) to
:class:`PermissionOverwrite` to apply to the channel.
rtc_region: Optional[:class:`VoiceRegion`]
The new region for the voice channel's voice communication.
A value of ``None`` indicates automatic voice region detection.
.. versionadded:: 1.7
Raises
------
InvalidArgument
If the permission overwrite information is not in proper form.
Forbidden
You do not have permissions to edit the channel.
HTTPException
Editing the channel failed.
"""
await self._edit(options, reason=reason)
class CategoryChannel(discord.abc.GuildChannel, Hashable):
"""Represents a Discord channel category.
These are useful to group channels to logical compartments.
.. container:: operations
.. describe:: x == y
Checks if two channels are equal.
.. describe:: x != y
Checks if two channels are not equal.
.. describe:: hash(x)
Returns the category's hash.
.. describe:: str(x)
Returns the category's name.
Attributes
-----------
name: :class:`str`
The category name.
guild: :class:`Guild`
The guild the category belongs to.
id: :class:`int`
The category channel ID.
position: :class:`int`
The position in the category list. This is a number that starts at 0. e.g. the
top category is position 0.
"""
__slots__ = ('name', 'id', 'guild', 'nsfw', '_state', 'position', '_overwrites', 'category_id')
def __init__(self, *, state, guild, data):
self._state = state
self.id = int(data['id'])
self._update(guild, data)
def __repr__(self):
return '<CategoryChannel id={0.id} name={0.name!r} position={0.position} nsfw={0.nsfw}>'.format(self)
def _update(self, guild, data):
self.guild = guild
self.name = data['name']
self.category_id = utils._get_as_snowflake(data, 'parent_id')
self.nsfw = data.get('nsfw', False)
self.position = data['position']
self._fill_overwrites(data)
@property
def _sorting_bucket(self):
return ChannelType.category.value
@property
def type(self):
""":class:`ChannelType`: The channel's Discord type."""
return ChannelType.category
def is_nsfw(self):
""":class:`bool`: Checks if the category is NSFW."""
return self.nsfw
@utils.copy_doc(discord.abc.GuildChannel.clone)
async def clone(self, *, name=None, reason=None):
return await self._clone_impl({
'nsfw': self.nsfw
}, name=name, reason=reason)
async def edit(self, *, reason=None, **options):
"""|coro|
Edits the channel.
You must have the :attr:`~Permissions.manage_channels` permission to
use this.
.. versionchanged:: 1.3
The ``overwrites`` keyword-only parameter was added.
Parameters
----------
name: :class:`str`
The new category's name.
position: :class:`int`
The new category's position.
nsfw: :class:`bool`
To mark the category as NSFW or not.
reason: Optional[:class:`str`]
The reason for editing this category. Shows up on the audit log.
overwrites: :class:`dict`
A :class:`dict` of target (either a role or a member) to
:class:`PermissionOverwrite` to apply to the channel.
Raises
------
InvalidArgument
If position is less than 0 or greater than the number of categories.
Forbidden
You do not have permissions to edit the category.
HTTPException
Editing the category failed.
"""
await self._edit(options=options, reason=reason)
@utils.copy_doc(discord.abc.GuildChannel.move)
async def move(self, **kwargs):
kwargs.pop('category', None)
await super().move(**kwargs)
@property
def channels(self):
"""List[:class:`abc.GuildChannel`]: Returns the channels that are under this category.
These are sorted by the official Discord UI, which places voice channels below the text channels.
"""
def comparator(channel):
return (not isinstance(channel, TextChannel), channel.position)
ret = [c for c in self.guild.channels if c.category_id == self.id]
ret.sort(key=comparator)
return ret
@property
def text_channels(self):
"""List[:class:`TextChannel`]: Returns the text channels that are under this category."""
ret = [c for c in self.guild.channels
if c.category_id == self.id
and isinstance(c, TextChannel)]
ret.sort(key=lambda c: (c.position, c.id))
return ret
@property
def voice_channels(self):
"""List[:class:`VoiceChannel`]: Returns the voice channels that are under this category."""
ret = [c for c in self.guild.channels
if c.category_id == self.id
and isinstance(c, VoiceChannel)]
ret.sort(key=lambda c: (c.position, c.id))
return ret
async def create_text_channel(self, name, *, overwrites=None, reason=None, **options):
"""|coro|
A shortcut method to :meth:`Guild.create_text_channel` to create a :class:`TextChannel` in the category.
Returns
-------
:class:`TextChannel`
The channel that was just created.
"""
return await self.guild.create_text_channel(name, overwrites=overwrites, category=self, reason=reason, **options)
async def create_voice_channel(self, name, *, overwrites=None, reason=None, **options):
"""|coro|
A shortcut method to :meth:`Guild.create_voice_channel` to create a :class:`VoiceChannel` in the category.
Returns
-------
:class:`VoiceChannel`
The channel that was just created.
"""
return await self.guild.create_voice_channel(name, overwrites=overwrites, category=self, reason=reason, **options)
class StoreChannel(discord.abc.GuildChannel, Hashable):
"""Represents a Discord guild store channel.
.. container:: operations
.. describe:: x == y
Checks if two channels are equal.
.. describe:: x != y
Checks if two channels are not equal.
.. describe:: hash(x)
Returns the channel's hash.
.. describe:: str(x)
Returns the channel's name.
Attributes
-----------
name: :class:`str`
The channel name.
guild: :class:`Guild`
The guild the channel belongs to.
id: :class:`int`
The channel ID.
category_id: :class:`int`
The category channel ID this channel belongs to.
position: :class:`int`
The position in the channel list. This is a number that starts at 0. e.g. the
top channel is position 0.
"""
__slots__ = ('name', 'id', 'guild', '_state', 'nsfw',
'category_id', 'position', '_overwrites',)
def __init__(self, *, state, guild, data):
self._state = state
self.id = int(data['id'])
self._update(guild, data)
def __repr__(self):
return '<StoreChannel id={0.id} name={0.name!r} position={0.position} nsfw={0.nsfw}>'.format(self)
def _update(self, guild, data):
self.guild = guild
self.name = data['name']
self.category_id = utils._get_as_snowflake(data, 'parent_id')
self.position = data['position']
self.nsfw = data.get('nsfw', False)
self._fill_overwrites(data)
@property
def _sorting_bucket(self):
return ChannelType.text.value
@property
def type(self):
""":class:`ChannelType`: The channel's Discord type."""
return ChannelType.store
@utils.copy_doc(discord.abc.GuildChannel.permissions_for)
def permissions_for(self, member):
base = super().permissions_for(member)
# store channels do not have voice related permissions
denied = Permissions.voice()
base.value &= ~denied.value
return base
def is_nsfw(self):
""":class:`bool`: Checks if the channel is NSFW."""
return self.nsfw
@utils.copy_doc(discord.abc.GuildChannel.clone)
async def clone(self, *, name=None, reason=None):
return await self._clone_impl({
'nsfw': self.nsfw
}, name=name, reason=reason)
async def edit(self, *, reason=None, **options):
"""|coro|
Edits the channel.
You must have the :attr:`~Permissions.manage_channels` permission to
use this.
Parameters
----------
name: :class:`str`
The new channel name.
position: :class:`int`
The new channel's position.
nsfw: :class:`bool`
To mark the channel as NSFW or not.
sync_permissions: :class:`bool`
Whether to sync permissions with the channel's new or pre-existing
category. Defaults to ``False``.
category: Optional[:class:`CategoryChannel`]
The new category for this channel. Can be ``None`` to remove the
category.
reason: Optional[:class:`str`]
The reason for editing this channel. Shows up on the audit log.
overwrites: :class:`dict`
A :class:`dict` of target (either a role or a member) to
:class:`PermissionOverwrite` to apply to the channel.
.. versionadded:: 1.3
Raises
------
InvalidArgument
If position is less than 0 or greater than the number of channels, or if
the permission overwrite information is not in proper form.
Forbidden
You do not have permissions to edit the channel.
HTTPException
Editing the channel failed.
"""
await self._edit(options, reason=reason)
class DMChannel(discord.abc.Messageable, Hashable):
"""Represents a Discord direct message channel.
.. container:: operations
.. describe:: x == y
Checks if two channels are equal.
.. describe:: x != y
Checks if two channels are not equal.
.. describe:: hash(x)
Returns the channel's hash.
.. describe:: str(x)
Returns a string representation of the channel
Attributes
----------
recipient: :class:`User`
The user you are participating with in the direct message channel.
me: :class:`ClientUser`
The user presenting yourself.
id: :class:`int`
The direct message channel ID.
"""
__slots__ = ('id', 'recipient', 'me', '_state')
def __init__(self, *, me, state, data):
self._state = state
self.recipient = state.store_user(data['recipients'][0])
self.me = me
self.id = int(data['id'])
async def _get_channel(self):
return self
def __str__(self):
return 'Direct Message with %s' % self.recipient
def __repr__(self):
return '<DMChannel id={0.id} recipient={0.recipient!r}>'.format(self)
@property
def type(self):
""":class:`ChannelType`: The channel's Discord type."""
return ChannelType.private
@property
def created_at(self):
""":class:`datetime.datetime`: Returns the direct message channel's creation time in UTC."""
return utils.snowflake_time(self.id)
def permissions_for(self, user=None):
"""Handles permission resolution for a :class:`User`.
This function is there for compatibility with other channel types.
Actual direct messages do not really have the concept of permissions.
This returns all the Text related permissions set to ``True`` except:
- :attr:`~Permissions.send_tts_messages`: You cannot send TTS messages in a DM.
- :attr:`~Permissions.manage_messages`: You cannot delete others messages in a DM.
Parameters
-----------
user: :class:`User`
The user to check permissions for. This parameter is ignored
but kept for compatibility.
Returns
--------
:class:`Permissions`
The resolved permissions.
"""
base = Permissions.text()
base.send_tts_messages = False
base.manage_messages = False
return base
def get_partial_message(self, message_id):
"""Creates a :class:`PartialMessage` from the message ID.
This is useful if you want to work with a message and only have its ID without
doing an unnecessary API call.
.. versionadded:: 1.6
Parameters
------------
message_id: :class:`int`
The message ID to create a partial message for.
Returns
---------
:class:`PartialMessage`
The partial message.
"""
from .message import PartialMessage
return PartialMessage(channel=self, id=message_id)
class GroupChannel(discord.abc.Messageable, Hashable):
"""Represents a Discord group channel.
.. container:: operations
.. describe:: x == y
Checks if two channels are equal.
.. describe:: x != y
Checks if two channels are not equal.
.. describe:: hash(x)
Returns the channel's hash.
.. describe:: str(x)
Returns a string representation of the channel
Attributes
----------
recipients: List[:class:`User`]
The users you are participating with in the group channel.
me: :class:`ClientUser`
The user presenting yourself.
id: :class:`int`
The group channel ID.
owner: :class:`User`
The user that owns the group channel.
icon: Optional[:class:`str`]
The group channel's icon hash if provided.
name: Optional[:class:`str`]
The group channel's name if provided.
"""
__slots__ = ('id', 'recipients', 'owner', 'icon', 'name', 'me', '_state')
def __init__(self, *, me, state, data):
self._state = state
self.id = int(data['id'])
self.me = me
self._update_group(data)
def _update_group(self, data):
owner_id = utils._get_as_snowflake(data, 'owner_id')
self.icon = data.get('icon')
self.name = data.get('name')
try:
self.recipients = [self._state.store_user(u) for u in data['recipients']]
except KeyError:
pass
if owner_id == self.me.id:
self.owner = self.me
else:
self.owner = utils.find(lambda u: u.id == owner_id, self.recipients)
async def _get_channel(self):
return self
def __str__(self):
if self.name:
return self.name
if len(self.recipients) == 0:
return 'Unnamed'
return ', '.join(map(lambda x: x.name, self.recipients))
def __repr__(self):
return '<GroupChannel id={0.id} name={0.name!r}>'.format(self)
@property
def type(self):
""":class:`ChannelType`: The channel's Discord type."""
return ChannelType.group
@property
def icon_url(self):
""":class:`Asset`: Returns the channel's icon asset if available.
This is equivalent to calling :meth:`icon_url_as` with
the default parameters ('webp' format and a size of 1024).
"""
return self.icon_url_as()
def icon_url_as(self, *, format='webp', size=1024):
"""Returns an :class:`Asset` for the icon the channel has.
The format must be one of 'webp', 'jpeg', 'jpg' or 'png'.
The size must be a power of 2 between 16 and 4096.
.. versionadded:: 2.0
Parameters
-----------
format: :class:`str`
The format to attempt to convert the icon to. Defaults to 'webp'.
size: :class:`int`
The size of the image to display.
Raises
------
InvalidArgument
Bad image format passed to ``format`` or invalid ``size``.
Returns
--------
:class:`Asset`
The resulting CDN asset.
"""
return Asset._from_icon(self._state, self, 'channel', format=format, size=size)
@property
def created_at(self):
""":class:`datetime.datetime`: Returns the channel's creation time in UTC."""
return utils.snowflake_time(self.id)
def permissions_for(self, user):
"""Handles permission resolution for a :class:`User`.
This function is there for compatibility with other channel types.
Actual direct messages do not really have the concept of permissions.
This returns all the Text related permissions set to ``True`` except:
- :attr:`~Permissions.send_tts_messages`: You cannot send TTS messages in a DM.
- :attr:`~Permissions.manage_messages`: You cannot delete others messages in a DM.
This also checks the kick_members permission if the user is the owner.
Parameters
-----------
user: :class:`User`
The user to check permissions for.
Returns
--------
:class:`Permissions`
The resolved permissions for the user.
"""
base = Permissions.text()
base.send_tts_messages = False
base.manage_messages = False
base.mention_everyone = True
if user.id == self.owner.id:
base.kick_members = True
return base
@utils.deprecated()
async def add_recipients(self, *recipients):
r"""|coro|
Adds recipients to this group.
A group can only have a maximum of 10 members.
Attempting to add more ends up in an exception. To
add a recipient to the group, you must have a relationship
with the user of type :attr:`RelationshipType.friend`.
.. deprecated:: 1.7
Parameters
-----------
\*recipients: :class:`User`
An argument list of users to add to this group.
Raises
-------
HTTPException
Adding a recipient to this group failed.
"""
# TODO: wait for the corresponding WS event
req = self._state.http.add_group_recipient
for recipient in recipients:
await req(self.id, recipient.id)
@utils.deprecated()
async def remove_recipients(self, *recipients):
r"""|coro|
Removes recipients from this group.
.. deprecated:: 1.7
Parameters
-----------
\*recipients: :class:`User`
An argument list of users to remove from this group.
Raises
-------
HTTPException
Removing a recipient from this group failed.
"""
# TODO: wait for the corresponding WS event
req = self._state.http.remove_group_recipient
for recipient in recipients:
await req(self.id, recipient.id)
@utils.deprecated()
async def edit(self, **fields):
"""|coro|
Edits the group.
.. deprecated:: 1.7
Parameters
-----------
name: Optional[:class:`str`]
The new name to change the group to.
Could be ``None`` to remove the name.
icon: Optional[:class:`bytes`]
A :term:`py:bytes-like object` representing the new icon.
Could be ``None`` to remove the icon.
Raises
-------
HTTPException
Editing the group failed.
"""
try:
icon_bytes = fields['icon']
except KeyError:
pass
else:
if icon_bytes is not None:
fields['icon'] = utils._bytes_to_base64_data(icon_bytes)
data = await self._state.http.edit_group(self.id, **fields)
self._update_group(data)
async def leave(self):
"""|coro|
Leave the group.
If you are the only one in the group, this deletes it as well.
Raises
-------
HTTPException
Leaving the group failed.
"""
await self._state.http.leave_group(self.id)
def _channel_factory(channel_type):
value = try_enum(ChannelType, channel_type)
if value is ChannelType.text:
return TextChannel, value
elif value is ChannelType.voice:
return VoiceChannel, value
elif value is ChannelType.private:
return DMChannel, value
elif value is ChannelType.category:
return CategoryChannel, value
elif value is ChannelType.group:
return GroupChannel, value
elif value is ChannelType.news:
return TextChannel, value
elif value is ChannelType.store:
return StoreChannel, value
else:
return None, value
| 32.287535 | 122 | 0.594494 |
79441de90b7e221371f7f7b0a944999b83d7b907 | 2,222 | py | Python | pur_beurre/urls.py | NicolasFlandrois/Pure-Beurre | b64db344e3eabed8b123a6127fe0d038da53ff6e | [
"MIT"
] | null | null | null | pur_beurre/urls.py | NicolasFlandrois/Pure-Beurre | b64db344e3eabed8b123a6127fe0d038da53ff6e | [
"MIT"
] | 7 | 2020-02-12T03:27:56.000Z | 2022-03-12T00:12:09.000Z | pur_beurre/urls.py | NicolasFlandrois/PurBeurre-LinuxDeploy | de0a6677647fd6df5f4856dc6ac42275dae6aff4 | [
"MIT"
] | 2 | 2020-01-17T11:23:27.000Z | 2021-02-15T10:54:19.000Z | """pur_beurre URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path, include
from users import views as user_views
urlpatterns = [
path('admin/', admin.site.urls),
path('register/', user_views.register, name='register'),
path('profile/', user_views.profile, name='profile'),
path('login/',
auth_views.LoginView.as_view(template_name='users/login.html'),
name='login'),
path('logout/', auth_views.LogoutView.as_view(
template_name='users/logout.html'),
name='logout'),
path('password-reset/', auth_views.PasswordResetView.as_view(
template_name='users/password_reset.html'), name='password_reset'),
path('password-reset/done/', auth_views.PasswordResetDoneView.as_view(
template_name='users/password_reset_done.html'),
name='password_reset_done'),
path('password-reset-confirm/<uidb64>/<token>/',
auth_views.PasswordResetConfirmView.as_view(
template_name='users/password_reset_confirm.html'),
name='password_reset_confirm'),
path('password-reset-complete/',
auth_views.PasswordResetCompleteView.as_view(
template_name='users/password_reset_complete.html'),
name='password_reset_complete'),
path('snacks/', include('snacks.urls')),
path('', include('home.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| 35.83871 | 77 | 0.69622 |
79441fdcef6acdc19af3bdef58942f4ee3737ae0 | 1,653 | py | Python | mysite1/urls.py | mohsenbjp/mysite1 | 8a8b8b4009bf16fe391fc64c8bf9d41f7f2e32a4 | [
"MIT"
] | null | null | null | mysite1/urls.py | mohsenbjp/mysite1 | 8a8b8b4009bf16fe391fc64c8bf9d41f7f2e32a4 | [
"MIT"
] | null | null | null | mysite1/urls.py | mohsenbjp/mysite1 | 8a8b8b4009bf16fe391fc64c8bf9d41f7f2e32a4 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path,include, re_path
from .views import *
from django.contrib.sitemaps.views import sitemap
from .sitemaps import StaticViewSitemap
from blog.sitemaps import BlogSitemap
from django.contrib.auth import views as auth_views
sitemaps={'static':StaticViewSitemap,'blog':BlogSitemap,}
app_name="mysite1"
urlpatterns = [
path('admin/', admin.site.urls),
path('',index,name='index'),
path('about/',about,name='about'),
path('elements/',elements,name='elements'),
path('contact/',contact,name='contact'),
path('blog/',include('blog.urls')),
path('summernote/',include('django_summernote.urls')),
path('sitemap.xml',sitemap,{'sitemaps':sitemaps},name='django.contrib.sitemaps.views.sitemap'),
path('robots.txt',include('robots.urls')),
path('accounts/', include('allauth.urls')),
# re_path(r"^.*",maintenance),
re_path(r'^password-reset/$', auth_views.PasswordResetView.as_view(), name='password_reset'),
re_path(r'^password-reset/done/$', auth_views.PasswordResetDoneView.as_view(), name='password_reset_done'),
re_path(r'^password-reset/confirm/(?P<uidb64>[-\w]+)/(?P<token>[-\w]+)/$',
auth_views.PasswordResetConfirmView.as_view(), name='password_reset_confirm'),
re_path(r'^password-reset/complete/$',
auth_views.PasswordResetCompleteView.as_view(), name='password_reset_complete'),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 45.916667 | 111 | 0.733817 |
79442011bbb74a04f1407311593e82ee84c6d4b0 | 298 | py | Python | Task1F.py | myrsinisem/python | 0ce7a5d1f174252aaff542e955416f8203facf63 | [
"MIT"
] | null | null | null | Task1F.py | myrsinisem/python | 0ce7a5d1f174252aaff542e955416f8203facf63 | [
"MIT"
] | null | null | null | Task1F.py | myrsinisem/python | 0ce7a5d1f174252aaff542e955416f8203facf63 | [
"MIT"
] | null | null | null | from floodsystem.stationdata import build_station_list
from floodsystem.station import inconsistent_typical_range_stations
stations=build_station_list()
inconsistent_stations=inconsistent_typical_range_stations(stations)
sorted_stations=sorted(inconsistent_stations)
print(sorted_stations) | 37.25 | 68 | 0.889262 |
794420437de53d647c65dade6ec8304a0558dc36 | 8,329 | py | Python | qiskit/aqua/components/neural_networks/pytorch_discriminator.py | MartenSkogh/qiskit-aqua | 4a997e6328e06e250212ef82c9414ad16834b7c6 | [
"Apache-2.0"
] | 2 | 2020-04-09T17:27:41.000Z | 2021-01-15T04:27:07.000Z | qiskit/aqua/components/neural_networks/pytorch_discriminator.py | MartenSkogh/qiskit-aqua | 4a997e6328e06e250212ef82c9414ad16834b7c6 | [
"Apache-2.0"
] | 2 | 2020-04-21T19:59:47.000Z | 2020-04-26T21:36:47.000Z | qiskit/aqua/components/neural_networks/pytorch_discriminator.py | MartenSkogh/qiskit-aqua | 4a997e6328e06e250212ef82c9414ad16834b7c6 | [
"Apache-2.0"
] | 1 | 2021-01-17T16:29:26.000Z | 2021-01-17T16:29:26.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2019, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
PyTorch Discriminator Neural Network
"""
from typing import Dict, Any
import os
import logging
import numpy as np
from qiskit.aqua import MissingOptionalLibraryError
from .discriminative_network import DiscriminativeNetwork
logger = logging.getLogger(__name__)
try:
import torch
from torch import nn, optim
from torch.autograd.variable import Variable
_HAS_TORCH = True
except ImportError:
_HAS_TORCH = False
class PyTorchDiscriminator(DiscriminativeNetwork):
"""
Discriminator based on PyTorch
"""
def __init__(self, n_features: int = 1, n_out: int = 1) -> None:
"""
Args:
n_features: Dimension of input data vector.
n_out: Dimension of the discriminator's output vector.
Raises:
MissingOptionalLibraryError: Pytorch not installed
"""
super().__init__()
if not _HAS_TORCH:
raise MissingOptionalLibraryError(
libname='Pytorch',
name='PyTorchDiscriminator',
pip_install='pip install qiskit-aqua[torch]')
self._n_features = n_features
self._n_out = n_out
# discriminator_net: torch.nn.Module or None, Discriminator network.
# pylint: disable=import-outside-toplevel
from ._pytorch_discriminator_net import DiscriminatorNet
self._discriminator = DiscriminatorNet(self._n_features, self._n_out)
# optimizer: torch.optim.Optimizer or None, Optimizer initialized w.r.t
# discriminator network parameters.
self._optimizer = optim.Adam(self._discriminator.parameters(), lr=1e-5, amsgrad=True)
self._ret = {} # type: Dict[str, Any]
def set_seed(self, seed: int):
"""
Set seed.
Args:
seed: seed
"""
torch.manual_seed(seed)
def save_model(self, snapshot_dir: str):
"""
Save discriminator model
Args:
snapshot_dir: directory path for saving the model
"""
torch.save(self._discriminator, os.path.join(snapshot_dir, 'discriminator.pt'))
def load_model(self, load_dir: str):
"""
Load discriminator model
Args:
load_dir: file with stored pytorch discriminator model to be loaded
"""
torch.load(load_dir)
@property
def discriminator_net(self):
"""
Get discriminator
Returns:
object: discriminator object
"""
return self._discriminator
@discriminator_net.setter
def discriminator_net(self, net):
self._discriminator = net
def get_label(self, x, detach=False): # pylint: disable=arguments-differ
"""
Get data sample labels, i.e. true or fake.
Args:
x (Union(numpy.ndarray, torch.Tensor)): Discriminator input, i.e. data sample.
detach (bool): if None detach from torch tensor variable (optional)
Returns:
torch.Tensor: Discriminator output, i.e. data label
"""
# pylint: disable=not-callable, no-member
if isinstance(x, torch.Tensor):
pass
else:
x = torch.tensor(x, dtype=torch.float32)
x = Variable(x)
if detach:
return self._discriminator.forward(x).detach().numpy()
else:
return self._discriminator.forward(x)
def loss(self, x, y, weights=None):
"""
Loss function
Args:
x (torch.Tensor): Discriminator output.
y (torch.Tensor): Label of the data point
weights (torch.Tensor): Data weights.
Returns:
torch.Tensor: Loss w.r.t to the generated data points.
"""
if weights is not None:
loss_funct = nn.BCELoss(weight=weights, reduction='sum')
else:
loss_funct = nn.BCELoss()
return loss_funct(x, y)
def gradient_penalty(self, x, lambda_=5., k=0.01, c=1.):
"""
Compute gradient penalty for discriminator optimization
Args:
x (numpy.ndarray): Generated data sample.
lambda_ (float): Gradient penalty coefficient 1.
k (float): Gradient penalty coefficient 2.
c (float): Gradient penalty coefficient 3.
Returns:
torch.Tensor: Gradient penalty.
"""
# pylint: disable=not-callable, no-member
if isinstance(x, torch.Tensor):
pass
else:
x = torch.tensor(x, dtype=torch.float32)
x = Variable(x)
# pylint: disable=no-member
delta_ = torch.rand(x.size()) * c
z = Variable(x + delta_, requires_grad=True)
o_l = self.get_label(z)
# pylint: disable=no-member
d_g = torch.autograd.grad(o_l, z, grad_outputs=torch.ones(o_l.size()),
create_graph=True)[0].view(z.size(0), -1)
return lambda_ * ((d_g.norm(p=2, dim=1) - k)**2).mean()
def train(self, data, weights, penalty=True,
quantum_instance=None, shots=None) -> Dict[str, Any]:
"""
Perform one training step w.r.t. to the discriminator's parameters
Args:
data (tuple):
real_batch: torch.Tensor, Training data batch.
generated_batch: numpy array, Generated data batch.
weights (tuple): real problem, generated problem
penalty (bool): Indicate whether or not penalty function is
applied to the loss function.
quantum_instance (QuantumInstance): Quantum Instance (depreciated)
shots (int): Number of shots for hardware or qasm execution.
Not used for classical network (only quantum ones)
Returns:
dict: with Discriminator loss (torch.Tensor) and updated parameters (array).
"""
# pylint: disable=E1101
# pylint: disable=E1102
# Reset gradients
self._optimizer.zero_grad()
real_batch = data[0]
real_prob = weights[0]
generated_batch = data[1]
generated_prob = weights[1]
real_batch = np.reshape(real_batch, (len(real_batch), 1))
real_batch = torch.tensor(real_batch, dtype=torch.float32)
real_batch = Variable(real_batch)
real_prob = np.reshape(real_prob, (len(real_prob), 1))
real_prob = torch.tensor(real_prob, dtype=torch.float32)
# Train on Real Data
prediction_real = self.get_label(real_batch)
# Calculate error and back propagate
error_real = self.loss(prediction_real, torch.ones(len(prediction_real), 1), real_prob)
error_real.backward()
# Train on Generated Data
generated_batch = np.reshape(generated_batch, (len(generated_batch), self._n_features))
generated_prob = np.reshape(generated_prob, (len(generated_prob), 1))
generated_prob = torch.tensor(generated_prob, dtype=torch.float32)
prediction_fake = self.get_label(generated_batch)
# Calculate error and back propagate
error_fake = self.loss(prediction_fake, torch.zeros(len(prediction_fake), 1),
generated_prob)
error_fake.backward()
if penalty:
self.gradient_penalty(real_batch).backward()
# pylint: enable=E1101
# pylint: enable=E1102
# Update weights with gradients
self._optimizer.step()
# Return error and predictions for real and fake inputs
loss_ret = 0.5 * (error_real + error_fake)
self._ret['loss'] = loss_ret.detach().numpy()
params = []
for param in self._discriminator.parameters():
params.append(param.data.detach().numpy())
self._ret['params'] = params
return self._ret
| 33.316 | 95 | 0.616521 |
79442084adbdda8063cb80ec1f604cb8cebe05c3 | 5,303 | py | Python | src/dirbs/api/common/apidoc.py | nealmadhu/DIRBS-Core | 5afac86233c56d28e1c76d1291c2a1fec302be6f | [
"BSD-3-Clause-Clear"
] | null | null | null | src/dirbs/api/common/apidoc.py | nealmadhu/DIRBS-Core | 5afac86233c56d28e1c76d1291c2a1fec302be6f | [
"BSD-3-Clause-Clear"
] | null | null | null | src/dirbs/api/common/apidoc.py | nealmadhu/DIRBS-Core | 5afac86233c56d28e1c76d1291c2a1fec302be6f | [
"BSD-3-Clause-Clear"
] | 1 | 2022-02-09T10:55:13.000Z | 2022-02-09T10:55:13.000Z | """
Subclass FlaskApiSpec to add support for documenting multiple API versions.
Copyright (c) 2018 Qualcomm Technologies, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the
limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Qualcomm Technologies, Inc. nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY
THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
from apispec import APISpec
from flask import Blueprint, url_for
from flask_apispec.extension import FlaskApiSpec
from flask_apispec.apidoc import ViewConverter, ResourceConverter
class ApiDoc(FlaskApiSpec):
"""Override base FlaskApiSpec constructor."""
def __init__(self, app, *, version):
"""Constructor."""
self.title = 'DIRBS Core'
self.version = version
self.apidoc_ui_url = '/apidocs/{0}/'.format(self.version)
self.apidoc_json_url = '/apidocs-json/{0}/'.format(self.version)
self._deferred = []
self.app = app
self.view_converter = None
self.resource_converter = None
self.spec = None
self.app = app
self.init_app()
def init_app(self):
"""Override base init_app method."""
self.view_converter = ViewConverter(self.app)
self.resource_converter = ResourceConverter(self.app)
self.spec = APISpec(
title=self.title,
version=self.version,
info={'description': self.top_level_description},
plugins=['apispec.ext.marshmallow']
)
self.add_swagger_routes()
for deferred in self._deferred:
deferred()
def add_swagger_routes(self):
"""Override base add_swagger_routes method.
Define blueprint for the OpenAPI spec to be served.
"""
spec_blueprint = Blueprint(
'flask-apispec-{0}'.format(self.version),
FlaskApiSpec.__module__,
static_folder='./static',
template_folder='./templates',
static_url_path='/flask-apispec/static',
)
@spec_blueprint.context_processor
def override_url_for():
return dict(url_for=custom_url_for)
def custom_url_for(endpoint, **values):
endpoint = endpoint.replace('flask-apispec', 'flask-apispec-{0}'.format(self.version))
return url_for(endpoint, **values)
spec_blueprint.add_url_rule(self.apidoc_json_url, 'swagger-json', self.swagger_json)
spec_blueprint.add_url_rule(self.apidoc_ui_url, 'swagger-ui', self.swagger_ui)
self.app.register_blueprint(spec_blueprint)
@property
def top_level_description(self):
"""Generate text for top level API document description."""
description = 'The document lists the APIs exposed by DIRBS Core system. ' \
'The APIs provide programmatic access to read data from DIRBS Core. ' \
'This documentation was built using Swagger UI. ' \
'Swagger UI allows users to visualize and interact with the API\'s resources ' \
'without having any of the implementation logic in place. ' \
'\n' \
'## MIME Types ' \
'\n' \
'The Core API supports [RFC 6838](https://tools.ietf.org/html/rfc6838) ' \
'compliant media types:' \
'\n\t * application/json' \
'\n' \
'## HTML Status Codes and Error Handling ' \
'\n' \
'The Core API will attempt to send the appropriate HTML status codes. ' \
'On error, the request response will contain details about the error cause when possible.' \
'\n\n' \
'Copyright \xA9 2018 Qualcomm Technologies, Inc. All rights reserved.'
return description
| 44.191667 | 118 | 0.659815 |
7944215484f01c0cc7e88804d849097b4e5338ed | 542 | py | Python | ctapipe/instrument/__init__.py | chaimain/ctapipe | ff80cff2daaf56e1d05ea6501c68fd83a9cf79d5 | [
"BSD-3-Clause"
] | 53 | 2015-06-23T15:24:20.000Z | 2021-09-23T22:30:58.000Z | ctapipe/instrument/__init__.py | chaimain/ctapipe | ff80cff2daaf56e1d05ea6501c68fd83a9cf79d5 | [
"BSD-3-Clause"
] | 1,537 | 2015-06-24T11:27:16.000Z | 2022-03-31T16:17:08.000Z | ctapipe/instrument/__init__.py | chaimain/ctapipe | ff80cff2daaf56e1d05ea6501c68fd83a9cf79d5 | [
"BSD-3-Clause"
] | 275 | 2015-07-09T14:09:28.000Z | 2022-03-17T22:25:51.000Z | from .camera import CameraDescription, CameraGeometry, CameraReadout, PixelShape
from .atmosphere import get_atmosphere_profile_functions
from .telescope import TelescopeDescription
from .optics import OpticsDescription
from .subarray import SubarrayDescription
from .guess import guess_telescope
__all__ = [
"CameraDescription",
"CameraGeometry",
"CameraReadout",
"get_atmosphere_profile_functions",
"TelescopeDescription",
"OpticsDescription",
"SubarrayDescription",
"guess_telescope",
"PixelShape",
]
| 27.1 | 80 | 0.784133 |
7944220a2dc51977c32494a25b813c0b851840b2 | 2,772 | py | Python | web_console_v2/api/fedlearner_webconsole/rpc/client.py | zhenv5/fedlearner | a8ff0eaef48e174d432a40d23d12c1f57e842ebd | [
"Apache-2.0"
] | null | null | null | web_console_v2/api/fedlearner_webconsole/rpc/client.py | zhenv5/fedlearner | a8ff0eaef48e174d432a40d23d12c1f57e842ebd | [
"Apache-2.0"
] | null | null | null | web_console_v2/api/fedlearner_webconsole/rpc/client.py | zhenv5/fedlearner | a8ff0eaef48e174d432a40d23d12c1f57e842ebd | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
# pylint: disable=broad-except
import grpc
from fedlearner_webconsole.proto import (
service_pb2, service_pb2_grpc, common_pb2
)
from fedlearner_webconsole.project.models import Project
def _build_channel(url, authority):
"""A helper function to build gRPC channel for easy testing."""
return grpc.insecure_channel(
target=url,
# options defined at
# https://github.com/grpc/grpc/blob/master/include/grpc/impl/codegen/grpc_types.h
options={
'grpc.default_authority': authority,
})
class RpcClient(object):
def __init__(self, project_name, receiver_name):
project = Project.query.filter_by(
name=project_name).first()
assert project is not None, \
'project {} not found'.format(project_name)
self._project = project.get_config()
assert receiver_name in self._project.participants, \
'receiver {} not found'.format(receiver_name)
self._receiver = self._project.participants[receiver_name]
self._client = service_pb2_grpc.WebConsoleV2ServiceStub(_build_channel(
self._receiver.grpc_spec.url,
self._receiver.grpc_spec.authority
))
def _get_metadata(self):
metadata = []
for key, value in self._receiver.grpc_spec.extra_headers.items():
metadata.append((key, value))
# metadata is a tuple of tuples
return tuple(metadata)
def check_connection(self):
msg = service_pb2.CheckConnectionRequest(
auth_info=service_pb2.ProjAuthInfo(
project_name=self._project.project_name,
sender_name=self._project.self_name,
receiver_name=self._receiver.name,
auth_token=self._receiver.sender_auth_token))
try:
response = self._client.CheckConnection(
request=msg, metadata=self._get_metadata())
return response.status
except Exception as e:
return common_pb2.Status(
code=common_pb2.STATUS_UNKNOWN_ERROR,
msg=repr(e))
| 37.459459 | 93 | 0.669192 |
794425cdbeb8519eb234565a8d18ef5955bec7ba | 2,549 | py | Python | opsrest/handlers/login.py | chinhtle/ops-restd | ab3599a0b8b4df99c35b3f99de6948b2c41630d5 | [
"Apache-2.0"
] | null | null | null | opsrest/handlers/login.py | chinhtle/ops-restd | ab3599a0b8b4df99c35b3f99de6948b2c41630d5 | [
"Apache-2.0"
] | null | null | null | opsrest/handlers/login.py | chinhtle/ops-restd | ab3599a0b8b4df99c35b3f99de6948b2c41630d5 | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2015-2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tornado import gen
from tornado.log import app_log
import re
import httplib
import userauth
from opsrest.handlers import base
from opsrest.exceptions import APIException, AuthenticationFailed
from opsrest.constants import USERNAME_KEY
from opsrest.utils.userutils import check_user_login_authorization
from opsrest.utils.utils import redirect_http_to_https
class LoginHandler(base.BaseHandler):
# pass the application reference to the handlers
def initialize(self, ref_object):
self.error_message = None
# Overwrite BaseHandler's prepare, as LoginHandler does not
# require authentication check prior to other operations
def prepare(self):
try:
redirect_http_to_https(self)
except Exception as e:
self.on_exception(e)
@gen.coroutine
def get(self):
try:
app_log.debug("Executing Login GET...")
is_authenticated = userauth.is_user_authenticated(self)
if not is_authenticated:
raise AuthenticationFailed
else:
self.set_status(httplib.OK)
except APIException as e:
self.on_exception(e)
except Exception as e:
self.on_exception(e)
self.finish()
@gen.coroutine
def post(self):
try:
app_log.debug("Executing Login POST...")
username = self.get_argument(USERNAME_KEY)
check_user_login_authorization(username)
login_success = userauth.handle_user_login(self)
if not login_success:
raise AuthenticationFailed('invalid username/password '
'combination')
else:
self.set_status(httplib.OK)
except APIException as e:
self.on_exception(e)
except Exception as e:
self.on_exception(e)
self.finish()
| 29.639535 | 76 | 0.667713 |
7944266b51bda6fcf8d4ad047a250d21fe76b746 | 376 | py | Python | lib/util.py | underc0de/reaper | ec74c8de08a57edfd4df745aad9c8c216c57f9d8 | [
"MIT"
] | null | null | null | lib/util.py | underc0de/reaper | ec74c8de08a57edfd4df745aad9c8c216c57f9d8 | [
"MIT"
] | null | null | null | lib/util.py | underc0de/reaper | ec74c8de08a57edfd4df745aad9c8c216c57f9d8 | [
"MIT"
] | null | null | null | from time import strftime, localtime, time
def get_time():
"""
Return the time
"""
return strftime("%H:%M:%S", localtime(time()))
def read_file(file_name):
"""
Return the content of a File
"""
with open(file_name, 'rb') as f:
return f.read()
def write_file(file_name):
"""
Write content to a File
"""
with open(file_name, 'w+b') as f:
return f | 16.347826 | 50 | 0.62766 |
79442671dd7f8fbef8d98188d73e50cba0635592 | 34,188 | py | Python | pandas/io/excel/_base.py | Alvinwuzw/pandas | 2d4372111e1a79058faa5ddd6bc0a9a73648a33a | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/io/excel/_base.py | Alvinwuzw/pandas | 2d4372111e1a79058faa5ddd6bc0a9a73648a33a | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/io/excel/_base.py | Alvinwuzw/pandas | 2d4372111e1a79058faa5ddd6bc0a9a73648a33a | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | import abc
import datetime
from io import BufferedIOBase, BytesIO, RawIOBase
import os
from textwrap import fill
from typing import Any, Mapping, Union
from pandas._config import config
from pandas._libs.parsers import STR_NA_VALUES
from pandas._typing import StorageOptions
from pandas.errors import EmptyDataError
from pandas.util._decorators import Appender, deprecate_nonkeyword_arguments
from pandas.core.dtypes.common import is_bool, is_float, is_integer, is_list_like
from pandas.core.frame import DataFrame
from pandas.io.common import (
IOArgs,
get_filepath_or_buffer,
is_url,
stringify_path,
urlopen,
validate_header_arg,
)
from pandas.io.excel._util import (
fill_mi_header,
get_default_writer,
get_writer,
maybe_convert_usecols,
pop_header_name,
)
from pandas.io.parsers import TextParser
_read_excel_doc = (
"""
Read an Excel file into a pandas DataFrame.
Supports `xls`, `xlsx`, `xlsm`, `xlsb`, `odf`, `ods` and `odt` file extensions
read from a local filesystem or URL. Supports an option to read
a single sheet or a list of sheets.
Parameters
----------
io : str, bytes, ExcelFile, xlrd.Book, path object, or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be: ``file://localhost/path/to/table.xlsx``.
If you want to pass in a path object, pandas accepts any ``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
sheet_name : str, int, list, or None, default 0
Strings are used for sheet names. Integers are used in zero-indexed
sheet positions. Lists of strings/integers are used to request
multiple sheets. Specify None to get all sheets.
Available cases:
* Defaults to ``0``: 1st sheet as a `DataFrame`
* ``1``: 2nd sheet as a `DataFrame`
* ``"Sheet1"``: Load sheet with name "Sheet1"
* ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5"
as a dict of `DataFrame`
* None: All sheets.
header : int, list of int, default 0
Row (0-indexed) to use for the column labels of the parsed
DataFrame. If a list of integers is passed those row positions will
be combined into a ``MultiIndex``. Use None if there is no header.
names : array-like, default None
List of column names to use. If file contains no header row,
then you should explicitly pass header=None.
index_col : int, list of int, default None
Column (0-indexed) to use as the row labels of the DataFrame.
Pass None if there is no such column. If a list is passed,
those columns will be combined into a ``MultiIndex``. If a
subset of data is selected with ``usecols``, index_col
is based on the subset.
usecols : int, str, list-like, or callable default None
* If None, then parse all columns.
* If str, then indicates comma separated list of Excel column letters
and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
both sides.
* If list of int, then indicates list of column numbers to be parsed.
* If list of string, then indicates list of column names to be parsed.
.. versionadded:: 0.24.0
* If callable, then evaluate each column name against it and parse the
column if the callable returns ``True``.
Returns a subset of the columns according to behavior above.
.. versionadded:: 0.24.0
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
Use `object` to preserve data as stored in Excel and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : str, default None
If io is not a buffer or path, this must be set to identify io.
Supported engines: "xlrd", "openpyxl", "odf", "pyxlsb", default "xlrd".
Engine compatibility :
- "xlrd" supports most old/new Excel file formats.
- "openpyxl" supports newer Excel file formats.
- "odf" supports OpenDocument file formats (.odf, .ods, .odt).
- "pyxlsb" supports Binary Excel files.
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the Excel cell content, and return the transformed
content.
true_values : list, default None
Values to consider as True.
false_values : list, default None
Values to consider as False.
skiprows : list-like, int, or callable, optional
Line numbers to skip (0-indexed) or number of lines to skip (int) at the
start of the file. If callable, the callable function will be evaluated
against the row indices, returning True if the row should be skipped and
False otherwise. An example of a valid callable argument would be ``lambda
x: x in [0, 2]``.
nrows : int, default None
Number of rows to parse.
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted
as NaN: '"""
+ fill("', '".join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ")
+ """'.
keep_default_na : bool, default True
Whether or not to include the default NaN values when parsing the data.
Depending on whether `na_values` is passed in, the behavior is as follows:
* If `keep_default_na` is True, and `na_values` are specified, `na_values`
is appended to the default NaN values used for parsing.
* If `keep_default_na` is True, and `na_values` are not specified, only
the default NaN values are used for parsing.
* If `keep_default_na` is False, and `na_values` are specified, only
the NaN values specified `na_values` are used for parsing.
* If `keep_default_na` is False, and `na_values` are not specified, no
strings will be parsed as NaN.
Note that if `na_filter` is passed in as False, the `keep_default_na` and
`na_values` parameters will be ignored.
na_filter : bool, default True
Detect missing value markers (empty strings and the value of na_values). In
data without any NAs, passing na_filter=False can improve the performance
of reading a large file.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
parse_dates : bool, list-like, or dict, default False
The behavior is as follows:
* bool. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {'foo' : [1, 3]} -> parse columns 1, 3 as date and call
result 'foo'
If a column or index contains an unparseable date, the entire column or
index will be returned unaltered as an object data type. If you don`t want to
parse some cells as date just change their type in Excel to "Text".
For non-standard datetime parsing, use ``pd.to_datetime`` after ``pd.read_excel``.
Note: A fast-path exists for iso8601-formatted dates.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. Pandas will try to call `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) call `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
thousands : str, default None
Thousands separator for parsing string columns to numeric. Note that
this parameter is only necessary for columns stored as TEXT in Excel,
any numeric columns will automatically be parsed, regardless of display
format.
comment : str, default None
Comments out remainder of line. Pass a character or characters to this
argument to indicate comments in the input file. Any data between the
comment string and the end of the current line is ignored.
skipfooter : int, default 0
Rows at the end to skip (0-indexed).
convert_float : bool, default True
Convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric
data will be read in as floats: Excel stores all numbers as floats
internally.
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
storage_options : dict, optional
Extra options that make sense for a particular storage connection, e.g.
host, port, username, password, etc., if using a URL that will
be parsed by ``fsspec``, e.g., starting "s3://", "gcs://". An error
will be raised if providing this argument with a local path or
a file-like buffer. See the fsspec and backend storage implementation
docs for the set of allowed keys and values.
.. versionadded:: 1.2.0
Returns
-------
DataFrame or dict of DataFrames
DataFrame from the passed in Excel file. See notes in sheet_name
argument for more information on when a dict of DataFrames is returned.
See Also
--------
DataFrame.to_excel : Write DataFrame to an Excel file.
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_fwf : Read a table of fixed-width formatted lines into DataFrame.
Examples
--------
The file can be read using the file name as string or an open file object:
>>> pd.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP
Name Value
0 string1 1
1 string2 2
2 #Comment 3
>>> pd.read_excel(open('tmp.xlsx', 'rb'),
... sheet_name='Sheet3') # doctest: +SKIP
Unnamed: 0 Name Value
0 0 string1 1
1 1 string2 2
2 2 #Comment 3
Index and header can be specified via the `index_col` and `header` arguments
>>> pd.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP
0 1 2
0 NaN Name Value
1 0.0 string1 1
2 1.0 string2 2
3 2.0 #Comment 3
Column types are inferred but can be explicitly specified
>>> pd.read_excel('tmp.xlsx', index_col=0,
... dtype={'Name': str, 'Value': float}) # doctest: +SKIP
Name Value
0 string1 1.0
1 string2 2.0
2 #Comment 3.0
True, False, and NA values, and thousands separators have defaults,
but can be explicitly specified, too. Supply the values you would like
as strings or lists of strings!
>>> pd.read_excel('tmp.xlsx', index_col=0,
... na_values=['string1', 'string2']) # doctest: +SKIP
Name Value
0 NaN 1
1 NaN 2
2 #Comment 3
Comment lines in the excel input file can be skipped using the `comment` kwarg
>>> pd.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP
Name Value
0 string1 1.0
1 string2 2.0
2 None NaN
"""
)
@deprecate_nonkeyword_arguments(allowed_args=2, version="2.0")
@Appender(_read_excel_doc)
def read_excel(
io,
sheet_name=0,
header=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
dtype=None,
engine=None,
converters=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
keep_default_na=True,
na_filter=True,
verbose=False,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=True,
mangle_dupe_cols=True,
storage_options: StorageOptions = None,
):
if not isinstance(io, ExcelFile):
io = ExcelFile(io, storage_options=storage_options, engine=engine)
elif engine and engine != io.engine:
raise ValueError(
"Engine should not be specified when passing "
"an ExcelFile - ExcelFile already has the engine set"
)
return io.parse(
sheet_name=sheet_name,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=squeeze,
dtype=dtype,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
keep_default_na=keep_default_na,
na_filter=na_filter,
verbose=verbose,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
convert_float=convert_float,
mangle_dupe_cols=mangle_dupe_cols,
)
class BaseExcelReader(metaclass=abc.ABCMeta):
def __init__(self, filepath_or_buffer, storage_options: StorageOptions = None):
self.ioargs = IOArgs(
filepath_or_buffer=filepath_or_buffer,
encoding=None,
mode=None,
compression={"method": None},
)
# If filepath_or_buffer is a url, load the data into a BytesIO
if is_url(filepath_or_buffer):
self.ioargs = IOArgs(
filepath_or_buffer=BytesIO(urlopen(filepath_or_buffer).read()),
should_close=True,
encoding=None,
mode=None,
compression={"method": None},
)
elif not isinstance(filepath_or_buffer, (ExcelFile, self._workbook_class)):
self.ioargs = get_filepath_or_buffer(
filepath_or_buffer, storage_options=storage_options
)
if isinstance(self.ioargs.filepath_or_buffer, self._workbook_class):
self.book = self.ioargs.filepath_or_buffer
elif hasattr(self.ioargs.filepath_or_buffer, "read"):
# N.B. xlrd.Book has a read attribute too
assert not isinstance(self.ioargs.filepath_or_buffer, str)
self.ioargs.filepath_or_buffer.seek(0)
self.book = self.load_workbook(self.ioargs.filepath_or_buffer)
elif isinstance(self.ioargs.filepath_or_buffer, str):
self.book = self.load_workbook(self.ioargs.filepath_or_buffer)
elif isinstance(self.ioargs.filepath_or_buffer, bytes):
self.book = self.load_workbook(BytesIO(self.ioargs.filepath_or_buffer))
else:
raise ValueError(
"Must explicitly set engine if not passing in buffer or path for io."
)
@property
@abc.abstractmethod
def _workbook_class(self):
pass
@abc.abstractmethod
def load_workbook(self, filepath_or_buffer):
pass
def close(self):
self.ioargs.close()
@property
@abc.abstractmethod
def sheet_names(self):
pass
@abc.abstractmethod
def get_sheet_by_name(self, name):
pass
@abc.abstractmethod
def get_sheet_by_index(self, index):
pass
@abc.abstractmethod
def get_sheet_data(self, sheet, convert_float):
pass
def parse(
self,
sheet_name=0,
header=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
dtype=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
verbose=False,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=True,
mangle_dupe_cols=True,
**kwds,
):
validate_header_arg(header)
ret_dict = False
# Keep sheetname to maintain backwards compatibility.
if isinstance(sheet_name, list):
sheets = sheet_name
ret_dict = True
elif sheet_name is None:
sheets = self.sheet_names
ret_dict = True
else:
sheets = [sheet_name]
# handle same-type duplicates.
sheets = list(dict.fromkeys(sheets).keys())
output = {}
header_input = header
for asheetname in sheets:
if verbose:
print(f"Reading sheet {asheetname}")
if isinstance(asheetname, str):
sheet = self.get_sheet_by_name(asheetname)
else: # assume an integer if not a string
sheet = self.get_sheet_by_index(asheetname)
data = self.get_sheet_data(sheet, convert_float)
usecols = maybe_convert_usecols(usecols)
if not data:
output[asheetname] = DataFrame()
continue
if isinstance(header_input, dict):
header = header_input[asheetname]
elif is_list_like(header) and len(header) == 1:
header = header[0]
# forward fill and pull out names for MultiIndex column
header_names = None
if header is not None and is_list_like(header):
header_names = []
control_row = [True] * len(data[0])
for row in header:
if is_integer(skiprows):
row += skiprows
# the line we changed
if row:
data[row], control_row = fill_mi_header(data[row], control_row)
if index_col is not None:
header_name, _ = pop_header_name(data[row], index_col)
header_names.append(header_name)
if is_list_like(index_col):
# Forward fill values for MultiIndex index.
if header is None:
offset = 0
elif not is_list_like(header):
offset = 1 + header
else:
offset = 1 + max(header)
# Check if we have an empty dataset
# before trying to collect data.
if offset < len(data):
for col in index_col:
last = data[offset][col]
for row in range(offset + 1, len(data)):
if data[row][col] == "" or data[row][col] is None:
data[row][col] = last
else:
last = data[row][col]
has_index_names = is_list_like(header) and len(header) > 1
# GH 12292 : error when read one empty column from excel file
try:
parser = TextParser(
data,
names=names,
header=header,
index_col=index_col,
has_index_names=has_index_names,
squeeze=squeeze,
dtype=dtype,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
usecols=usecols,
mangle_dupe_cols=mangle_dupe_cols,
**kwds,
)
output[asheetname] = parser.read(nrows=nrows)
if not squeeze or isinstance(output[asheetname], DataFrame):
if header_names:
output[asheetname].columns = output[
asheetname
].columns.set_names(header_names)
except EmptyDataError:
# No Data, return an empty DataFrame
output[asheetname] = DataFrame()
if ret_dict:
return output
else:
return output[asheetname]
class ExcelWriter(metaclass=abc.ABCMeta):
"""
Class for writing DataFrame objects into excel sheets.
Default is to use xlwt for xls, openpyxl for xlsx, odf for ods.
See DataFrame.to_excel for typical usage.
Parameters
----------
path : str or typing.BinaryIO
Path to xls or xlsx or ods file.
engine : str (optional)
Engine to use for writing. If None, defaults to
``io.excel.<extension>.writer``. NOTE: can only be passed as a keyword
argument.
date_format : str, default None
Format string for dates written into Excel files (e.g. 'YYYY-MM-DD').
datetime_format : str, default None
Format string for datetime objects written into Excel files.
(e.g. 'YYYY-MM-DD HH:MM:SS').
mode : {'w', 'a'}, default 'w'
File mode to use (write or append).
.. versionadded:: 0.24.0
Attributes
----------
None
Methods
-------
None
Notes
-----
None of the methods and properties are considered public.
For compatibility with CSV writers, ExcelWriter serializes lists
and dicts to strings before writing.
Examples
--------
Default usage:
>>> with ExcelWriter('path_to_file.xlsx') as writer:
... df.to_excel(writer)
To write to separate sheets in a single file:
>>> with ExcelWriter('path_to_file.xlsx') as writer:
... df1.to_excel(writer, sheet_name='Sheet1')
... df2.to_excel(writer, sheet_name='Sheet2')
You can set the date format or datetime format:
>>> with ExcelWriter('path_to_file.xlsx',
... date_format='YYYY-MM-DD',
... datetime_format='YYYY-MM-DD HH:MM:SS') as writer:
... df.to_excel(writer)
You can also append to an existing Excel file:
>>> with ExcelWriter('path_to_file.xlsx', mode='a') as writer:
... df.to_excel(writer, sheet_name='Sheet3')
You can store Excel file in RAM:
>>> import io
>>> buffer = io.BytesIO()
>>> with pd.ExcelWriter(buffer) as writer:
... df.to_excel(writer)
You can pack Excel file into zip archive:
>>> import zipfile
>>> with zipfile.ZipFile('path_to_file.zip', 'w') as zf:
... with zf.open('filename.xlsx', 'w') as buffer:
... with pd.ExcelWriter(buffer) as writer:
... df.to_excel(writer)
"""
# Defining an ExcelWriter implementation (see abstract methods for more...)
# - Mandatory
# - ``write_cells(self, cells, sheet_name=None, startrow=0, startcol=0)``
# --> called to write additional DataFrames to disk
# - ``supported_extensions`` (tuple of supported extensions), used to
# check that engine supports the given extension.
# - ``engine`` - string that gives the engine name. Necessary to
# instantiate class directly and bypass ``ExcelWriterMeta`` engine
# lookup.
# - ``save(self)`` --> called to save file to disk
# - Mostly mandatory (i.e. should at least exist)
# - book, cur_sheet, path
# - Optional:
# - ``__init__(self, path, engine=None, **kwargs)`` --> always called
# with path as first argument.
# You also need to register the class with ``register_writer()``.
# Technically, ExcelWriter implementations don't need to subclass
# ExcelWriter.
def __new__(cls, path, engine=None, **kwargs):
# only switch class if generic(ExcelWriter)
if cls is ExcelWriter:
if engine is None or (isinstance(engine, str) and engine == "auto"):
if isinstance(path, str):
ext = os.path.splitext(path)[-1][1:]
else:
ext = "xlsx"
try:
engine = config.get_option(f"io.excel.{ext}.writer")
if engine == "auto":
engine = get_default_writer(ext)
except KeyError as err:
raise ValueError(f"No engine for filetype: '{ext}'") from err
cls = get_writer(engine)
return object.__new__(cls)
# declare external properties you can count on
curr_sheet = None
path = None
@property
@abc.abstractmethod
def supported_extensions(self):
"""Extensions that writer engine supports."""
pass
@property
@abc.abstractmethod
def engine(self):
"""Name of engine."""
pass
@abc.abstractmethod
def write_cells(
self, cells, sheet_name=None, startrow=0, startcol=0, freeze_panes=None
):
"""
Write given formatted cells into Excel an excel sheet
Parameters
----------
cells : generator
cell of formatted data to save to Excel sheet
sheet_name : str, default None
Name of Excel sheet, if None, then use self.cur_sheet
startrow : upper left cell row to dump data frame
startcol : upper left cell column to dump data frame
freeze_panes: int tuple of length 2
contains the bottom-most row and right-most column to freeze
"""
pass
@abc.abstractmethod
def save(self):
"""
Save workbook to disk.
"""
pass
def __init__(
self,
path,
engine=None,
date_format=None,
datetime_format=None,
mode="w",
**engine_kwargs,
):
# validate that this engine can handle the extension
if isinstance(path, str):
ext = os.path.splitext(path)[-1]
self.check_extension(ext)
self.path = path
self.sheets = {}
self.cur_sheet = None
if date_format is None:
self.date_format = "YYYY-MM-DD"
else:
self.date_format = date_format
if datetime_format is None:
self.datetime_format = "YYYY-MM-DD HH:MM:SS"
else:
self.datetime_format = datetime_format
self.mode = mode
def __fspath__(self):
# pandas\io\excel\_base.py:744: error: Argument 1 to "stringify_path"
# has incompatible type "Optional[Any]"; expected "Union[str, Path,
# IO[Any], IOBase]" [arg-type]
return stringify_path(self.path) # type: ignore[arg-type]
def _get_sheet_name(self, sheet_name):
if sheet_name is None:
sheet_name = self.cur_sheet
if sheet_name is None: # pragma: no cover
raise ValueError("Must pass explicit sheet_name or set cur_sheet property")
return sheet_name
def _value_with_fmt(self, val):
"""
Convert numpy types to Python types for the Excel writers.
Parameters
----------
val : object
Value to be written into cells
Returns
-------
Tuple with the first element being the converted value and the second
being an optional format
"""
fmt = None
if is_integer(val):
val = int(val)
elif is_float(val):
val = float(val)
elif is_bool(val):
val = bool(val)
elif isinstance(val, datetime.datetime):
fmt = self.datetime_format
elif isinstance(val, datetime.date):
fmt = self.date_format
elif isinstance(val, datetime.timedelta):
val = val.total_seconds() / float(86400)
fmt = "0"
else:
val = str(val)
return val, fmt
@classmethod
def check_extension(cls, ext: str):
"""
checks that path's extension against the Writer's supported
extensions. If it isn't supported, raises UnsupportedFiletypeError.
"""
if ext.startswith("."):
ext = ext[1:]
# error: "Callable[[ExcelWriter], Any]" has no attribute "__iter__"
# (not iterable) [attr-defined]
if not any(
ext in extension
for extension in cls.supported_extensions # type: ignore[attr-defined]
):
raise ValueError(f"Invalid extension for engine '{cls.engine}': '{ext}'")
else:
return True
# Allow use as a contextmanager
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""synonym for save, to make it more file-like"""
return self.save()
def _is_ods_stream(stream: Union[BufferedIOBase, RawIOBase]) -> bool:
"""
Check if the stream is an OpenDocument Spreadsheet (.ods) file
It uses magic values inside the stream
Parameters
----------
stream : Union[BufferedIOBase, RawIOBase]
IO stream with data which might be an ODS file
Returns
-------
is_ods : bool
Boolean indication that this is indeed an ODS file or not
"""
stream.seek(0)
is_ods = False
if stream.read(4) == b"PK\003\004":
stream.seek(30)
is_ods = (
stream.read(54) == b"mimetype"
b"application/vnd.oasis.opendocument.spreadsheet"
)
stream.seek(0)
return is_ods
class ExcelFile:
"""
Class for parsing tabular excel sheets into DataFrame objects.
Uses xlrd engine by default. See read_excel for more documentation
Parameters
----------
path_or_buffer : str, path object (pathlib.Path or py._path.local.LocalPath),
a file-like object, xlrd workbook or openpypl workbook.
If a string or path object, expected to be a path to a
.xls, .xlsx, .xlsb, .xlsm, .odf, .ods, or .odt file.
engine : str, default None
If io is not a buffer or path, this must be set to identify io.
Supported engines: ``xlrd``, ``openpyxl``, ``odf``, ``pyxlsb``,
default ``xlrd``.
Engine compatibility :
- ``xlrd`` supports most old/new Excel file formats.
- ``openpyxl`` supports newer Excel file formats.
- ``odf`` supports OpenDocument file formats (.odf, .ods, .odt).
- ``pyxlsb`` supports Binary Excel files.
"""
from pandas.io.excel._odfreader import ODFReader
from pandas.io.excel._openpyxl import OpenpyxlReader
from pandas.io.excel._pyxlsb import PyxlsbReader
from pandas.io.excel._xlrd import XlrdReader
_engines: Mapping[str, Any] = {
"xlrd": XlrdReader,
"openpyxl": OpenpyxlReader,
"odf": ODFReader,
"pyxlsb": PyxlsbReader,
}
def __init__(
self, path_or_buffer, engine=None, storage_options: StorageOptions = None
):
if engine is None:
engine = "xlrd"
if isinstance(path_or_buffer, (BufferedIOBase, RawIOBase)):
if _is_ods_stream(path_or_buffer):
engine = "odf"
else:
ext = os.path.splitext(str(path_or_buffer))[-1]
if ext == ".ods":
engine = "odf"
if engine not in self._engines:
raise ValueError(f"Unknown engine: {engine}")
self.engine = engine
self.storage_options = storage_options
# Could be a str, ExcelFile, Book, etc.
self.io = path_or_buffer
# Always a string
self._io = stringify_path(path_or_buffer)
self._reader = self._engines[engine](self._io, storage_options=storage_options)
def __fspath__(self):
return self._io
def parse(
self,
sheet_name=0,
header=0,
names=None,
index_col=None,
usecols=None,
squeeze=False,
converters=None,
true_values=None,
false_values=None,
skiprows=None,
nrows=None,
na_values=None,
parse_dates=False,
date_parser=None,
thousands=None,
comment=None,
skipfooter=0,
convert_float=True,
mangle_dupe_cols=True,
**kwds,
):
"""
Parse specified sheet(s) into a DataFrame.
Equivalent to read_excel(ExcelFile, ...) See the read_excel
docstring for more info on accepted parameters.
Returns
-------
DataFrame or dict of DataFrames
DataFrame from the passed in Excel file.
"""
return self._reader.parse(
sheet_name=sheet_name,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=squeeze,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
convert_float=convert_float,
mangle_dupe_cols=mangle_dupe_cols,
**kwds,
)
@property
def book(self):
return self._reader.book
@property
def sheet_names(self):
return self._reader.sheet_names
def close(self):
"""close io if necessary"""
self._reader.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __del__(self):
# Ensure we don't leak file descriptors, but put in try/except in case
# attributes are already deleted
try:
self.close()
except AttributeError:
pass
| 33.816024 | 87 | 0.614163 |
79442688528877f19538302cd834c0bc231e8349 | 959 | py | Python | leetcode/two_numbers_sum.py | clnFind/DayDayAlgorithm | 5644a666a3d84547d8cf00031fc2e30273cc0e9a | [
"Apache-2.0"
] | null | null | null | leetcode/two_numbers_sum.py | clnFind/DayDayAlgorithm | 5644a666a3d84547d8cf00031fc2e30273cc0e9a | [
"Apache-2.0"
] | null | null | null | leetcode/two_numbers_sum.py | clnFind/DayDayAlgorithm | 5644a666a3d84547d8cf00031fc2e30273cc0e9a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import copy
class Solution(object):
"""
给定 nums = [2, 7, 11, 15], target = 9
因为 nums[0] + nums[1] = 2 + 7 = 9
所以返回 [0, 1]
"""
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
for i in range(len(nums)):
nums_copy = copy.copy(nums)
nums_copy.remove(nums[i])
for j in nums_copy:
if nums[i] + j == target:
return i, nums.index(j)
return None
def two_sum(self, nums, target):
for num in nums:
val = target - num
if val in nums:
return nums.index(num), nums.index(val)
return None
if __name__ == '__main__':
l = [3, 4, 10, 2, 7]
target = 9
result = Solution().twoSum(l, target)
print(result)
result1 = Solution().two_sum(l, target)
print(result1)
| 21.795455 | 55 | 0.486966 |
794427628dbd2c3568043014d78159e949d60936 | 1,813 | py | Python | src/azure-cli/azure/cli/command_modules/acr/_client_factory.py | shinilm/azure-cli | 7c5f44151010b4b64d822f8cbe3e725f3525a448 | [
"MIT"
] | 1 | 2019-06-21T05:07:38.000Z | 2019-06-21T05:07:38.000Z | src/azure-cli/azure/cli/command_modules/acr/_client_factory.py | shinilm/azure-cli | 7c5f44151010b4b64d822f8cbe3e725f3525a448 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/acr/_client_factory.py | shinilm/azure-cli | 7c5f44151010b4b64d822f8cbe3e725f3525a448 | [
"MIT"
] | 1 | 2019-06-21T05:08:09.000Z | 2019-06-21T05:08:09.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands.client_factory import get_mgmt_service_client
VERSION_2017_10_GA = "2017-10-01"
def get_arm_service_client(cli_ctx):
"""Returns the client for managing ARM resources. """
from azure.mgmt.resource import ResourceManagementClient
return get_mgmt_service_client(cli_ctx, ResourceManagementClient)
def get_storage_service_client(cli_ctx):
"""Returns the client for managing storage accounts. """
from azure.cli.core.profiles import ResourceType
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_STORAGE)
def get_acr_service_client(cli_ctx, api_version=None):
"""Returns the client for managing container registries. """
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
return get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient, api_version=api_version)
def cf_acr_registries(cli_ctx, *_):
return get_acr_service_client(cli_ctx, VERSION_2017_10_GA).registries
def cf_acr_registries_tasks(cli_ctx, *_):
return get_acr_service_client(cli_ctx).registries
def cf_acr_replications(cli_ctx, *_):
return get_acr_service_client(cli_ctx, VERSION_2017_10_GA).replications
def cf_acr_webhooks(cli_ctx, *_):
return get_acr_service_client(cli_ctx, VERSION_2017_10_GA).webhooks
def cf_acr_tasks(cli_ctx, *_):
return get_acr_service_client(cli_ctx).tasks
def cf_acr_runs(cli_ctx, *_):
return get_acr_service_client(cli_ctx).runs
| 35.54902 | 103 | 0.720905 |
794427d27d892dcc7bb62e77d9eda676c3c64985 | 4,376 | py | Python | face_recognition_cam/recognition.py | andreaconti/face_recognition_cam | 2f9d95d982bb12dadbe8d933c6548244745aa2af | [
"MIT"
] | null | null | null | face_recognition_cam/recognition.py | andreaconti/face_recognition_cam | 2f9d95d982bb12dadbe8d933c6548244745aa2af | [
"MIT"
] | null | null | null | face_recognition_cam/recognition.py | andreaconti/face_recognition_cam | 2f9d95d982bb12dadbe8d933c6548244745aa2af | [
"MIT"
] | null | null | null | """
Module containing person recognition
"""
from typing import Dict, List, Tuple
import mxnet as mx # type: ignore
import numpy as np # type: ignore
from numpy import ndarray
from pkg_resources import resource_filename
from scipy.spatial.distance import cdist # type: ignore
class FaceRecognizer:
"""
Class for face embedding and recognizing
"""
def __init__(self):
# find file path
params_path = resource_filename(
"face_recognition_cam.resources.models", "mobileFaceNet-0000.params"
)
symbols_path = resource_filename(
"face_recognition_cam.resources.models", "mobileFaceNet-symbol.json"
)
# load model
ctx = mx.cpu()
sym = mx.sym.load_json(open(symbols_path, "r").read())
model = mx.gluon.nn.SymbolBlock(outputs=sym, inputs=mx.sym.var("data"))
model.load_parameters(params_path, ctx=ctx)
self._model = model
def embed_faces(self, faces: ndarray) -> ndarray:
"""
Performs face embedding given a list of faces in a ndarray.
Parameters
----------
faces: array_like
faces parameter must have [N, 112, 112, 3] shape, images must be RGB.
Returns
-------
array_like
of shape [N, 192] where each row is a face embedded
"""
if len(faces.shape) == 3:
faces = faces[None, :, :, :]
elif len(faces.shape) == 4:
_, h, w, c = faces.shape
if c != 3 or h != 112 or w != 112:
raise ValueError("expected images of shape 3x112x112")
else:
raise ValueError("shape must be 3 or 4 (a batch)")
# embed
faces = np.moveaxis(faces, -1, 1)
faces_embedded = self._model(mx.nd.array(faces))
faces_embedded = faces_embedded.asnumpy()
return faces_embedded
def generate_dataset(self, people: Dict[str, ndarray]) -> Dict[str, ndarray]:
"""
Takes as input a dictionary containing as key the name of each
person and as value a ndarray representing a batch of images of
that person and returns another dictionary 'name': embedding.
Parameters
----------
people: Dict[str, ndarray]
a dictionary containing for each name a ndarray containing images of that
person. each ndarray must be of shape [N, 112, 112, 3]
Returns
-------
Dict[str, ndarray]
where each ndarray is the embedding
"""
result: Dict[str, ndarray] = {}
for name, imgs in people.items():
result[name] = self.embed_faces(imgs)
return result
def assign_names(
self, dataset: Dict[str, ndarray], faces: ndarray, min_confidence: float = 0.5
) -> List[Tuple[str, float]]:
"""
Assign a name to each face in `faces`.
Parameters
----------
dataset: Dict[str, ndarray]
a dictionary in which each name is associated to many embeddings. It can be
generated with `generate_dataset` method.
faces: ndarray
a numpy ndarray of shape [N,112, 112, 3] where each [112, 112, 3] is a
face.
min_confidence: float, default 0.6
if among people the maximum found confidence is less than `min_confidence`
such face is labeled as 'unknown'
Returns
-------
List[str]
the name associated to each face, can be 'unknown' if the maximum confidence
found is less than `min_confidence`
"""
people_emb = self.embed_faces(faces)
# compute confidence matrix
confidence_matrix = np.zeros((len(dataset), people_emb.shape[0]))
names = np.empty((len(dataset),), dtype=object)
for i, (name, emb) in enumerate(dataset.items()):
names[i] = name
confidence_matrix[i, :] = np.max(
1 - cdist(emb, people_emb, metric="cosine"), axis=0
)
# find best matches
best = np.argmax(confidence_matrix, axis=0)
confidences = confidence_matrix[best, np.arange(confidence_matrix.shape[1])]
names = names[best]
names[confidences < min_confidence] = "unknown"
result = list(zip(names, confidences))
return result
| 33.151515 | 88 | 0.589122 |
794428d17f831d3b7989f3fec9411c37f1a34ae9 | 77,808 | py | Python | pstem/stem-random-walk-nin-20-74/stem-random-walk-nin-20-74_examples.py | Jeffrey-Ede/partial-STEM | 916766091b81b54cacde85a319f34a47d50bce48 | [
"MIT"
] | 10 | 2019-06-12T00:24:37.000Z | 2021-11-23T12:58:02.000Z | pstem/stem-random-walk-nin-20-74/stem-random-walk-nin-20-74_examples.py | Jeffrey-Ede/partial-STEM | 916766091b81b54cacde85a319f34a47d50bce48 | [
"MIT"
] | null | null | null | pstem/stem-random-walk-nin-20-74/stem-random-walk-nin-20-74_examples.py | Jeffrey-Ede/partial-STEM | 916766091b81b54cacde85a319f34a47d50bce48 | [
"MIT"
] | 5 | 2019-06-11T05:49:10.000Z | 2021-11-12T15:55:22.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import argparse
import numpy as np
import tensorflow as tf
import cv2
from scipy.misc import imread
from scipy import ndimage as nd
import time
import os, random
from PIL import Image
from PIL import ImageDraw
import functools
import itertools
import collections
import six
from tensorflow.python.platform import tf_logging as logging
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
from tensorflow.python.training import device_setter
from tensorflow.contrib.learn.python.learn import run_config
from tensorflow.contrib.framework.python.ops import add_arg_scope
slim = tf.contrib.slim
tf.logging.set_verbosity(tf.logging.DEBUG)
tf.set_random_seed(1234)
scale = 0 #Make scale large to spped up initial testing
gen_features0 = 32 if not scale else 1
gen_features1 = 64 if not scale else 1
gen_features2 = 64 if not scale else 1
gen_features3 = 32 if not scale else 1
nin_features1 = 128 if not scale else 1
nin_features2 = 256 if not scale else 1
nin_features3 = 512 if not scale else 1
nin_features4 = 768 if not scale else 1
features1 = 64 if not scale else 1
features2 = 128 if not scale else 1
features3 = 256 if not scale else 1
features4 = 512 if not scale else 1
features5 = features4 if not scale else 1
num_global_enhancer_blocks = 6
num_local_enhancer_blocks = 3
data_dir = "//Desktop-sa1evjv/f/ARM_scans-crops/"
modelSavePeriod = 4. #Train timestep in hours
modelSavePeriod *= 3600 #Convert to s
model_dir = "//ads.warwick.ac.uk/shared/HCSS6/Shared305/Microscopy/Jeffrey-Ede/models/stem-random-walk-nin-20-74/"
shuffle_buffer_size = 5000
num_parallel_calls = 6
num_parallel_readers = 6
prefetch_buffer_size = 12
batch_size = 1
num_gpus = 1
#batch_size = 8 #Batch size to use during training
num_epochs = 1000000 #Dataset repeats indefinitely
logDir = "C:/dump/train/"
log_file = model_dir+"log.txt"
val_log_file = model_dir+"val_log.txt"
discr_pred_file = model_dir+"discr_pred.txt"
log_every = 1 #Log every _ examples
cumProbs = np.array([]) #Indices of the distribution plus 1 will be correspond to means
numMeans = 64 // batch_size
scaleMean = 4 #Each means array index increment corresponds to this increase in the mean
numDynamicGrad = 1 #Number of gradients to calculate for each possible mean when dynamically updating training
lossSmoothingBoxcarSize = 5
channels = 1 #Greyscale input image
#Sidelength of images to feed the neural network
cropsize = 512
use_mask = False #If true, supply mask to network as additional information
generator_input_size = cropsize
height_crop = width_crop = cropsize
discr_size = 70
weight_decay = 0.0
batch_decay_gen = 0.999
batch_decay_discr = 0.999
initial_learning_rate = 0.001
initial_discriminator_learning_rate = 0.001
num_workers = 1
increase_batch_size_by_factor = 1
effective_batch_size = increase_batch_size_by_factor*batch_size
save_result_every_n_batches = 25000
val_skip_n = 50
trainee_switch_skip_n = 1
max_num_since_training_change = 0
disp_select = False #Display selelected pixels upon startup
def int_shape(x):
return list(map(int, x.get_shape()))
def spectral_norm(w, iteration=1, count=0):
w0 = w
w_shape = w.shape.as_list()
w = tf.reshape(w, [-1, w_shape[-1]])
u = tf.get_variable("u"+str(count),
[1, w_shape[-1]],
initializer=tf.random_normal_initializer(mean=0.,stddev=0.03),
trainable=False)
u_hat = u
v_hat = None
for i in range(iteration):
"""
power iteration
Usually iteration = 1 will be enough
"""
v_ = tf.matmul(u_hat, tf.transpose(w))
v_hat = tf.nn.l2_normalize(v_)
u_ = tf.matmul(v_hat, w)
u_hat = tf.nn.l2_normalize(u_)
u_hat = tf.stop_gradient(u_hat)
v_hat = tf.stop_gradient(v_hat)
sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))
with tf.control_dependencies([u.assign(u_hat)]):
w_norm = w / sigma
w_norm = tf.reshape(w_norm, w_shape)
return w_norm
adjusted_mse_counter = 0
def adjusted_mse(img1, img2):
return tf.losses.mean_squared_error(img1, img2)
def pad(tensor, size):
d1_pad = size[0]
d2_pad = size[1]
paddings = tf.constant([[0, 0], [d1_pad, d1_pad], [d2_pad, d2_pad], [0, 0]], dtype=tf.int32)
padded = tf.pad(tensor, paddings, mode="REFLECT")
return padded
def gaussian_kernel(size: int,
mean: float,
std: float,
):
"""Makes 2D gaussian Kernel for convolution."""
d = tf.distributions.Normal(mean, std)
vals = d.prob(tf.range(start = -size, limit = size + 1, dtype = tf.float32))
gauss_kernel = tf.einsum('i,j->ij', vals, vals)
return gauss_kernel / tf.reduce_sum(gauss_kernel)
def blur(image):
gauss_kernel = gaussian_kernel( 1, 0., 1.5 )
#Expand dimensions of `gauss_kernel` for `tf.nn.conv2d` signature
gauss_kernel = gauss_kernel[:, :, tf.newaxis, tf.newaxis]
#Convolve
image = pad(image, (1,1))
return tf.nn.conv2d(image, gauss_kernel, strides=[1, 1, 1, 1], padding="VALID")
#Track average MSEs
adjusted_mse_counter += 1
avg = tf.get_variable(
name=f"avg-{adjusted_mse_counter}",
shape=img1.get_shape(),
initializer=3*tf.ones(img1.get_shape()))
squared_errors = (img1 - img2)**2
update_avg = tf.assign(avg, 0.999*avg + 0.001*squared_errors)
with tf.control_dependencies([update_avg]):
#Errors for px with systematically higher MSEs are increased
scale = blur(avg)
scale /= tf.reduce_mean(scale)
mse = tf.reduce_mean( scale*squared_errors )
return mse
alrc_counter = 0
def alrc(loss, num_stddev=3, decay=0.999, mu1_start=25, mu2_start=30**2):
global alrc_counter; alrc_counter += 1
#Varables to track first two raw moments of the loss
mu = tf.get_variable(
f"mu-{alrc_counter}",
initializer=tf.constant(mu1_start, dtype=tf.float32))
mu2 = tf.get_variable(
f"mu2-{alrc_counter}",
initializer=tf.constant(mu2_start, dtype=tf.float32))
#Use capped loss for moment updates to limit the effect of extreme losses on the threshold
sigma = tf.sqrt(mu2 - mu**2+1.e-8)
loss = tf.where(loss < mu+num_stddev*sigma,
loss,
loss/tf.stop_gradient(loss/(mu+num_stddev*sigma)))
#Update moments
with tf.control_dependencies([mu.assign(decay*mu+(1-decay)*loss), mu2.assign(decay*mu2+(1-decay)*loss**2)]):
return tf.identity(loss)
capper_counter = 0
def capper_fn(x):
return alrc(x)
global capper_counter; capper_counter += 1
mu = tf.get_variable(f"mu-{capper_counter}", initializer=tf.constant(25, dtype=tf.float32))
mu2 = tf.get_variable(f"mu2-{capper_counter}", initializer=tf.constant(30**2, dtype=tf.float32))
def cap(x):
sigma = tf.sqrt(mu2 - mu**2+1.e-8)
capped_x = tf.cond(x < mu+3*sigma, lambda: x, lambda: x/tf.stop_gradient(x/(mu+3*sigma)))
return capped_x
x = cap(x)
with tf.control_dependencies([mu.assign(0.999*mu+0.001*x), mu2.assign(0.999*mu2+0.001*x**2)]):
return tf.cond(x <= 1, lambda: x, lambda: tf.sqrt(x + 1.e-8))
def generator_architecture(inputs, small_inputs, mask, small_mask, norm_decay, init_pass):
"""Generates fake data to try and fool the discrimator"""
with tf.variable_scope("Network", reuse=not init_pass):
def gaussian_noise(x, sigma=0.3, deterministic=False, name=''):
with tf.variable_scope(name):
if deterministic:
return x
else:
noise = tf.random_normal(shape=tf.shape(x), mean=0.0, stddev=sigma, dtype=tf.float32)
return x + noise
concat_axis = 3
def int_shape(x):
return list(map(int, x.get_shape()))
mu_counter = 0
def mean_only_batch_norm(input, decay=norm_decay, reuse_counter=None, init=init_pass):
mu = tf.reduce_mean(input, keepdims=True)
shape = int_shape(mu)
if not reuse_counter and init_pass: #Variable not being reused
nonlocal mu_counter
mu_counter += 1
running_mean = tf.get_variable("mu"+str(mu_counter),
dtype=tf.float32,
initializer=tf.constant(np.zeros(shape, dtype=np.float32)),
trainable=False)
else:
running_mean = tf.get_variable("mu"+str(mu_counter))
running_mean = decay*running_mean + (1-decay)*mu
mean_only_norm = input - running_mean
return mean_only_norm
def _actv_func(x, slope=0.01):
x = tf.nn.leaky_relu(x, slope)
return x
def get_var_maybe_avg(var_name, ema, **kwargs):
''' utility for retrieving polyak averaged params '''
v = tf.get_variable(var_name, **kwargs)
if ema is not None:
v = ema.average(v)
return v
def get_vars_maybe_avg(var_names, ema, **kwargs):
''' utility for retrieving polyak averaged params '''
vars = []
for vn in var_names:
vars.append(get_var_maybe_avg(vn, ema, **kwargs))
return vars
def mean_only_batch_norm_impl(x, pop_mean, b, is_conv_out=True, deterministic=False,
decay=norm_decay, name='meanOnlyBatchNormalization'):
'''
input comes in which is t=(g*V/||V||)*x
deterministic : separates training and testing phases
'''
with tf.variable_scope(name):
if deterministic:
# testing phase, return the result with the accumulated batch mean
return x - pop_mean + b
else:
# compute the current minibatch mean
if is_conv_out:
# using convolutional layer as input
m, _ = tf.nn.moments(x, [0,1,2])
else:
# using fully connected layer as input
m, _ = tf.nn.moments(x, [0])
# update minibatch mean variable
pop_mean_op = tf.assign(pop_mean, tf.scalar_mul(0.99, pop_mean) + tf.scalar_mul(1-0.99, m))
with tf.control_dependencies([pop_mean_op]):
return x - m + b
def batch_norm_impl(x,is_conv_out=True, deterministic=False, decay=norm_decay, name='BatchNormalization'):
with tf.variable_scope(name):
scale = tf.get_variable('scale',shape=x.get_shape()[-1],
dtype=tf.float32,initializer=tf.ones_initializer(),trainable=True)
beta = tf.get_variable('beta',shape=x.get_shape()[-1],
dtype=tf.float32,initializer=tf.zeros_initializer(),trainable=True)
pop_mean = tf.get_variable('pop_mean',shape=x.get_shape()[-1],
dtype=tf.float32,initializer=tf.zeros_initializer(), trainable=False)
pop_var = tf.get_variable('pop_var',shape=x.get_shape()[-1],
dtype=tf.float32,initializer=tf.ones_initializer(), trainable=False)
if deterministic:
return tf.nn.batch_normalization(x,pop_mean,pop_var,beta,scale,0.001)
else:
if is_conv_out:
batch_mean, batch_var = tf.nn.moments(x,[0,1,2])
else:
batch_mean, batch_var = tf.nn.moments(x,[0])
pop_mean_op = tf.assign(pop_mean, pop_mean * 0.99 + batch_mean * (1 - 0.99))
pop_var_op = tf.assign(pop_var, pop_var * 0.99 + batch_var * (1 - 0.99))
with tf.control_dependencies([pop_mean_op, pop_var_op]):
return tf.nn.batch_normalization(x, batch_mean, batch_var, beta, scale, 0.001)
conv2d_counter = 0
def conv2d(x, num_filters, stride=1, filter_size=3, pad='SAME', nonlinearity=_actv_func, init_scale=1., init=init_pass,
use_weight_normalization=True, use_batch_normalization=False, mean_only_norm=False,
deterministic=False, slope=0.01):
filter_size = [filter_size,filter_size]
stride = [stride,stride]
'''
deterministic : used for batch normalizations (separates the training and testing phases)
'''
nonlocal conv2d_counter
conv2d_counter += 1
name = 'conv'+str(conv2d_counter)
with tf.variable_scope(name):
V = tf.get_variable('V', shape=filter_size+[int(x.get_shape()[-1]),num_filters], dtype=tf.float32,
initializer=tf.random_normal_initializer(0, 0.05), trainable=True)
if use_batch_normalization is False: # not using bias term when doing batch normalization, avoid indefinit growing of the bias, according to BN2015 paper
b = tf.get_variable('b', shape=[num_filters], dtype=tf.float32,
initializer=tf.constant_initializer(0.), trainable=True)
if mean_only_norm:
pop_mean = tf.get_variable('meanOnlyBatchNormalization/pop_mean',shape=[num_filters],
dtype=tf.float32, initializer=tf.zeros_initializer(),trainable=False)
if use_weight_normalization:
g = tf.get_variable('g', shape=[num_filters], dtype=tf.float32,
initializer=tf.constant_initializer(1.), trainable=True)
if init:
v_norm = tf.nn.l2_normalize(V,[0,1,2])
x = tf.nn.conv2d(x, v_norm, strides=[1] + stride + [1],padding=pad)
m_init, v_init = tf.nn.moments(x, [0,1,2])
scale_init=init_scale/tf.sqrt(v_init + 1e-08)
g = g.assign(scale_init)
b = b.assign(-m_init*scale_init)
x = tf.reshape(scale_init,[1,1,1,num_filters])*(x-tf.reshape(m_init,[1,1,1,num_filters]))
else:
W = tf.reshape(g, [1, 1, 1, num_filters]) * tf.nn.l2_normalize(V, [0, 1, 2])
if mean_only_norm: # use weight-normalization combined with mean-only-batch-normalization
x = tf.nn.conv2d(x,W,strides=[1]+stride+[1],padding=pad)
x = mean_only_batch_norm_impl(x,pop_mean,b,is_conv_out=True, deterministic=deterministic)
else:
# use just weight-normalization
x = tf.nn.bias_add(tf.nn.conv2d(x, W, [1] + stride + [1], pad), b)
elif use_batch_normalization:
x = tf.nn.conv2d(x,V,[1]+stride+[1],pad)
x = batch_norm_impl(x,is_conv_out=True,deterministic=deterministic)
else:
x = tf.nn.bias_add(tf.nn.conv2d(x,V,strides=[1]+stride+[1],padding=pad),b)
# apply nonlinearity
if nonlinearity is not None:
x = nonlinearity(x, slope)
return x
deconv2d_counter = 0
def deconv2d(x, num_filters, stride=1, filter_size=3, pad='SAME', nonlinearity=_actv_func,
init_scale=1., init=init_pass,
use_weight_normalization=True, use_batch_normalization=False, mean_only_norm=True,
deterministic=False, name='', slope=0.01):
filter_size = [filter_size,filter_size]
stride = [stride,stride]
'''
deterministic : used for batch normalizations (separates the training and testing phases)
'''
nonlocal deconv2d_counter
deconv2d_counter += 1
name = 'deconv'+str(deconv2d_counter)
xs = int_shape(x)
if pad=='SAME':
target_shape = [xs[0], xs[1]*stride[0], xs[2]*stride[1], num_filters]
else:
target_shape = [xs[0], xs[1]*stride[0] + filter_size[0]-1, xs[2]*stride[1] + filter_size[1]-1, num_filters]
with tf.variable_scope(name):
V = tf.get_variable('V', shape=filter_size+[num_filters,int(x.get_shape()[-1])], dtype=tf.float32,
initializer=tf.random_normal_initializer(0, 0.05), trainable=True)
#V = tf.get_variable('V', shape=filter_size+[int(x.get_shape()[-1]), num_filters], dtype=tf.float32,
# initializer=tf.random_normal_initializer(0, 0.05), trainable=True)
if use_batch_normalization is False: # not using bias term when doing batch normalization, avoid indefinit growing of the bias, according to BN2015 paper
b = tf.get_variable('b', shape=[num_filters], dtype=tf.float32,
initializer=tf.constant_initializer(0.), trainable=True)
if mean_only_norm:
pop_mean = tf.get_variable('meanOnlyBatchNormalization/pop_mean',shape=[num_filters], dtype=tf.float32, initializer=tf.zeros_initializer(),trainable=False)
if use_weight_normalization:
g = tf.get_variable('g', shape=[num_filters], dtype=tf.float32,
initializer=tf.constant_initializer(1.), trainable=True)
if init:
v_norm = tf.nn.l2_normalize(V,[0,1,2])
x = tf.nn.conv2d_transpose(x, v_norm, target_shape, strides=[1] + stride + [1],padding=pad)
m_init, v_init = tf.nn.moments(x, [0,1,2])
scale_init=init_scale/tf.sqrt(v_init + 1e-08)
g = g.assign(scale_init)
b = b.assign(-m_init*scale_init)
x = tf.reshape(scale_init,[1,1,1,num_filters])*(x-tf.reshape(m_init,[1,1,1,num_filters]))
else:
W = tf.reshape(g, [1, 1, num_filters, 1]) * tf.nn.l2_normalize(V, [0, 1, 2])
if mean_only_norm: # use weight-normalization combined with mean-only-batch-normalization
x = tf.nn.conv2d_transpose(x,W,target_shape,strides=[1]+stride+[1],padding=pad)
x = mean_only_batch_norm_impl(x,pop_mean,b,is_conv_out=True, deterministic=deterministic)
else:
# use just weight-normalization
x = tf.nn.bias_add(tf.nn.conv2d(x, W, [1] + stride + [1], pad), b)
elif use_batch_normalization:
x = tf.nn.conv2d(x,V,[1]+stride+[1],pad)
x = batch_norm_impl(x,is_conv_out=True,deterministic=deterministic)
else:
x = tf.nn.bias_add(tf.nn.conv2d(x,V,strides=[1]+stride+[1],padding=pad),b)
# apply nonlinearity
if nonlinearity is not None:
x = nonlinearity(x, slope)
return x
def xception_middle_block(input, features):
main_flow = conv2d(
x=input,
num_filters=features,
stride=1)
main_flow = conv2d(
x=main_flow,
num_filters=features,
stride=1)
main_flow = conv2d(
x=main_flow,
num_filters=features,
stride=1)
return main_flow + input
def init_batch_norm(x):
batch_mean, batch_var = tf.nn.moments(x,[0])
return (x - batch_mean) / np.sqrt( batch_var + 0.001 )
def network_in_network(input, nin_features_out, mask=None):
if use_mask:
concatenation = tf.concat(values=[input, mask], axis=concat_axis)
else:
concatenation = input
with tf.variable_scope("Inner"):
nin = conv2d(concatenation, 64, 1,
filter_size=5,
mean_only_norm=True,
use_weight_normalization=not use_mask, slope=0.1)
residuals = False
if residuals:
nin = conv2d(nin, nin_features1, 2, slope=0.1)
nin1 = nin
nin = conv2d(nin, nin_features2, 2, slope=0.1)
nin2 = nin
nin = conv2d(nin, nin_features3, 2, slope=0.1)
nin3 = nin
nin = conv2d(nin, nin_features4, 2, slope=0.1)
for _ in range(num_global_enhancer_blocks):
nin = xception_middle_block(nin, nin_features4)
nin = deconv2d(nin, nin_features3, 2)
nin += nin3
nin = deconv2d(nin, nin_features2, 2)
nin += nin2
nin = deconv2d(nin, nin_features1, 2)
nin += nin1
nin = deconv2d(nin, nin_features_out, 2)
else:
nin = conv2d(nin, nin_features1, 2)
nin = conv2d(nin, nin_features2, 2)
nin = conv2d(nin, nin_features3, 2)
nin = conv2d(nin, nin_features4, 2)
for _ in range(num_global_enhancer_blocks):
nin = xception_middle_block(nin, nin_features4)
nin = deconv2d(nin, nin_features3, 2)
nin = deconv2d(nin, nin_features2, 2)
nin = deconv2d(nin, nin_features1, 2)
nin = deconv2d(nin, nin_features_out, 2)
with tf.variable_scope("Trainer"):
inner = conv2d(nin, 64, 1)
inner = conv2d(inner, 1, 1, mean_only_norm=False, nonlinearity=None)
return nin, inner
##Model building
if not init_pass:
input = inputs
small_input = small_inputs
else:
input = tf.random_uniform(shape=int_shape(inputs), minval=-0.8, maxval=0.8)
input *= mask
small_input = tf.image.resize_images(input, (cropsize//2, cropsize//2))
with tf.variable_scope("Inner"):
if not use_mask:
nin, inner = network_in_network(small_input, gen_features1)
else:
nin, inner = network_in_network(small_input, gen_features1, small_mask)
with tf.variable_scope("Outer"):
if use_mask:
concatenation = tf.concat(values=[input, mask], axis=concat_axis)
else:
concatenation = input
enc = conv2d(x=concatenation,
num_filters=gen_features0,
stride=1,
filter_size=5,
mean_only_norm=not use_mask, slope=0.1)
enc = conv2d(enc, gen_features1, 2, slope=0.1)
enc = enc + nin
for _ in range(num_local_enhancer_blocks):
enc = xception_middle_block(enc, gen_features2)
enc = deconv2d(enc, gen_features3, 2)
enc = conv2d(enc, gen_features3, 1)
outer = conv2d(enc, 1, 1, mean_only_norm=False, nonlinearity=None)
return inner, outer
def discriminator_architecture(inputs, second_input=None, phase=False, params=None,
gen_loss=0., reuse=False):
"""Three discriminators to discriminate between two data discributions"""
with tf.variable_scope("GAN/Discr", reuse=reuse):
def int_shape(x):
return list(map(int, x.get_shape()))
#phase = mode == tf.estimator.ModeKeys.TRAIN #phase is true during training
concat_axis = 3
def _instance_norm(net, train=phase):
batch, rows, cols, channels = [i.value for i in net.get_shape()]
var_shape = [channels]
mu, sigma_sq = tf.nn.moments(net, [1,2], keep_dims=True)
shift = tf.Variable(tf.zeros(var_shape), trainable=False)
scale = tf.Variable(tf.ones(var_shape), trainable=False)
epsilon = 1.e-3
normalized = (net - mu) / (sigma_sq + epsilon)**(.5)
return scale*normalized + shift
def instance_then_activ(input):
batch_then_activ = _instance_norm(input)
batch_then_activ = tf.nn.relu(batch_then_activ)
return batch_then_activ
##Reusable blocks
def _batch_norm_fn(input):
batch_norm = tf.contrib.layers.batch_norm(
input,
epsilon=0.001,
decay=0.999,
center=True,
scale=True,
is_training=phase,
fused=True,
zero_debias_moving_mean=False,
renorm=False)
return batch_norm
def batch_then_activ(input): #Changed to instance norm for stability
batch_then_activ = input#_instance_norm(input)
batch_then_activ = tf.nn.leaky_relu(batch_then_activ, alpha=0.2)
return batch_then_activ
def conv_block_not_sep(input, filters, kernel_size=3, phase=phase, batch_and_activ=True):
"""
Convolution -> batch normalisation -> leaky relu
phase defaults to true, meaning that the network is being trained
"""
conv_block = slim.conv2d(
inputs=input,
num_outputs=filters,
kernel_size=kernel_size,
padding="SAME",
activation_fn=None)
if batch_and_activ:
conv_block = batch_then_activ(conv_block)
return conv_block
def conv_block(input, filters, phase=phase):
"""
Convolution -> batch normalisation -> leaky relu
phase defaults to true, meaning that the network is being trained
"""
conv_block = strided_conv_block(input, filters, 1, 1)
return conv_block
count = 0
def discr_conv_block(input, filters, stride, rate=1, phase=phase, kernel_size=3, actv=True):
nonlocal count
count += 1
w = tf.get_variable("kernel"+str(count), shape=[kernel_size, kernel_size, input.get_shape()[-1], filters])
b = tf.get_variable("bias"+str(count), [filters], initializer=tf.constant_initializer(0.0))
x = tf.nn.conv2d(input=input, filter=spectral_norm(w, count=count),
strides=[1, stride, stride, 1], padding='VALID') + b
if actv:
x = batch_then_activ(x)
return x
def residual_conv(input, filters):
residual = slim.conv2d(
inputs=input,
num_outputs=filters,
kernel_size=1,
stride=2,
padding="SAME",
activation_fn=None)
residual = batch_then_activ(residual)
return residual
def xception_encoding_block(input, features):
cnn = conv_block(
input=input,
filters=features)
cnn = conv_block(
input=cnn,
filters=features)
cnn = strided_conv_block(
input=cnn,
filters=features,
stride=2)
residual = residual_conv(input, features)
cnn += residual
return cnn
def xception_encoding_block_diff(input, features_start, features_end):
cnn = conv_block(
input=input,
filters=features_start)
cnn = conv_block(
input=cnn,
filters=features_start)
cnn = strided_conv_block(
input=cnn,
filters=features_end,
stride=2)
residual = residual_conv(input, features_end)
cnn += residual
return cnn
def xception_middle_block(input, features):
main_flow = strided_conv_block(
input=input,
filters=features,
stride=1)
main_flow = strided_conv_block(
input=main_flow,
filters=features,
stride=1)
main_flow = strided_conv_block(
input=main_flow,
filters=features,
stride=1)
return main_flow + input
def shared_flow(input, layers):
shared = xception_encoding_block_diff(input, features2, features3)
layers.append(shared)
shared = xception_encoding_block_diff(shared, features3, features4)
layers.append(shared)
shared = xception_encoding_block(shared, features5)
layers.append(shared)
shared = xception_middle_block(shared, features5)
layers.append(shared)
shared = xception_middle_block(shared, features5)
layers.append(shared)
shared = xception_middle_block(shared, features5)
layers.append(shared)
shared = xception_middle_block(shared, features5)
layers.append(shared)
return shared, layers
def terminating_fc(input):
fc = tf.reduce_mean(input, [1,2])
fc = tf.reshape(fc, (-1, features5))
fc = tf.contrib.layers.fully_connected(inputs=fc,
num_outputs=1,
activation_fn=None)
return fc
def max_pool(input, size=2, stride=2):
pool = tf.contrib.layers.max_pool2d(inputs=input,
kernel_size=size,
stride=stride,
padding='SAME')
return pool
testing_scale = 1
features1 = 64 // testing_scale
features2 = 128 // testing_scale
features3 = 256 // testing_scale
features4 = 512 // testing_scale
def discriminate(x):
"""Discriminator architecture"""
x = discr_conv_block(x, features1, 2, 1, kernel_size=4)
x = discr_conv_block(x, features2, 2, 1, kernel_size=4)
x = discr_conv_block(x, features3, 2, 1, kernel_size=4)
#x = discr_conv_block(x, features3, 1, 1, kernel_size=4)
x = discr_conv_block(x, features4, 2, 1, kernel_size=4)
x = tf.reduce_sum(x, axis=[1,2,3])
#shape = int_shape(x)
#x = tf.reshape(x, (-1, shape[1]*shape[2]*shape[3]))
#x = tf.contrib.layers.fully_connected(
# inputs=x, num_outputs=1, biases_initializer=None, activation_fn=None)
return x
'''Model building'''
with tf.variable_scope("small", reuse=reuse) as small_scope:
small = inputs[0]
small = discriminate(small)
with tf.variable_scope("medium", reuse=reuse) as medium_scope:
medium = inputs[1]
medium = discriminate(medium)
with tf.variable_scope("large", reuse=reuse) as large_scope:
large = inputs[2]
large = discriminate(large)
discriminations = []
for x in [small, medium, large]:
clipped = x#tf.clip_by_value(x, clip_value_min=0, clip_value_max=1000) #5*l2_norm
discriminations.append( clipped )
return discriminations
def experiment(feature, ground_truth, mask, learning_rate_ph, discr_lr_ph, beta1_ph,
discr_beta1_ph, norm_decay, train_outer_ph, ramp_ph, initialize):
def pad(tensor, size):
d1_pad = size[0]
d2_pad = size[1]
paddings = tf.constant([[0, 0], [d1_pad, d1_pad], [d2_pad, d2_pad], [0, 0]], dtype=tf.int32)
padded = tf.pad(tensor, paddings, mode="REFLECT")
return padded
def gaussian_kernel(size: int,
mean: float,
std: float,
):
"""Makes 2D gaussian Kernel for convolution."""
d = tf.distributions.Normal(mean, std)
vals = d.prob(tf.range(start = -size, limit = size + 1, dtype = tf.float32))
gauss_kernel = tf.einsum('i,j->ij', vals, vals)
return gauss_kernel / tf.reduce_sum(gauss_kernel)
def blur(image):
gauss_kernel = gaussian_kernel( 2, 0., 2.5 )
#Expand dimensions of `gauss_kernel` for `tf.nn.conv2d` signature
gauss_kernel = gauss_kernel[:, :, tf.newaxis, tf.newaxis]
#Convolve
image = pad(image, (2,2))
return tf.nn.conv2d(image, gauss_kernel, strides=[1, 1, 1, 1], padding="VALID")
def get_multiscale_crops(input, multiscale_channels=1):
"""Assumes square inputs"""
input = pad(input, (2*discr_size, 2*discr_size)) #Extra padding to reduce periodic artefacts
s = int_shape(input)
small = tf.random_crop(
input,
size=(batch_size, discr_size, discr_size, multiscale_channels))
small = tf.image.resize_images(small, (discr_size, discr_size))
medium = tf.random_crop(
input,
size=(batch_size, 2*discr_size, 2*discr_size, multiscale_channels))
medium = tf.image.resize_images(medium, (discr_size, discr_size))
large = tf.random_crop(
input,
size=(batch_size, 4*discr_size, 4*discr_size, multiscale_channels))
large = tf.image.resize_images(large, (discr_size, discr_size))
return small, medium, large
#Generator
feature = tf.reshape(feature, [-1, cropsize, cropsize, channels])
feature_small = tf.image.resize_images(feature, (cropsize//2, cropsize//2))
truth = tf.reshape(ground_truth, [-1, cropsize, cropsize, channels])
truth_small = tf.image.resize_images(truth, (cropsize//2, cropsize//2))
small_mask = tf.image.resize_images(mask, (cropsize//2, cropsize//2))
if initialize:
print("Started initialization")
_, _ = generator_architecture(
feature, feature_small, mask, small_mask, norm_decay, init_pass=True)
print("Initialized")
output_inner, output_outer = generator_architecture(
feature, feature_small, mask, small_mask, norm_decay, init_pass=False)
print("Architecture ready")
#Blurred images
blur_truth_small = blur(truth_small)
blur_output_inner = blur(output_inner)
blur_truth = blur(truth)
blur_output_outer = blur(output_outer)
#Trainable parameters
model_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="Network")
model_params_inner = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="Network/Inner/Inner")
model_params_trainer = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="Network/Inner/Trainer")
model_params_outer = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="Network/Outer")
##Discriminators
#Intermediate image for gradient penalty calculation
epsilon = tf.random_uniform(
shape=[2, 1, 1, 1, 1],
minval=0.,
maxval=1.)
X_hat_outer = (1-epsilon[0])*truth + epsilon[0]*output_outer
X_hat_inner = (1-epsilon[1])*blur_truth_small + epsilon[1]*output_inner
discr_inputs_outer = [output_outer, truth, X_hat_outer]
discr_inputs_inner = [output_inner, blur_truth_small, X_hat_inner]
#Crop images at multiple scales at the same places for each scale
concat_outer = tf.concat(discr_inputs_outer, axis=3)
concat_inner = tf.concat(discr_inputs_inner, axis=3)
num_channels_outer = len(discr_inputs_outer)
num_channels_inner = len(discr_inputs_inner)
multiscale_crops_outer = get_multiscale_crops(concat_outer, multiscale_channels=num_channels_outer)
multiscale_crops_inner = get_multiscale_crops(concat_inner, multiscale_channels=num_channels_inner)
multiscale_crops_outer = [tf.unstack(crop, axis=3) for crop in multiscale_crops_outer]
multiscale_crops_inner = [tf.unstack(crop, axis=3) for crop in multiscale_crops_inner]
#Sort crops into categories
shape = (batch_size, discr_size, discr_size, channels)
crops_set_outer = []
for crops in multiscale_crops_outer:
crops_set_outer.append( [tf.reshape(unstacked, shape) for unstacked in crops] )
crops_set_inner = []
for crops in multiscale_crops_inner:
crops_set_inner.append( [tf.reshape(unstacked, shape) for unstacked in crops] )
#Get intermediate representations
multiscale_xhat_outer = [m[2] for m in crops_set_outer]
multiscale_xhat_inner = [m[2] for m in crops_set_inner]
#Concatenate so the crops can be processed as a single batch
multiscale_outer = []
for crops in crops_set_outer:
multiscale_outer.append( tf.concat(crops, axis=0) )
multiscale_inner = []
for crops in crops_set_inner:
multiscale_inner.append( tf.concat(crops, axis=0) )
_discrimination_outer = discriminator_architecture( multiscale_outer )
_discrimination_inner = discriminator_architecture( multiscale_inner, reuse=True )
model_params_discr_small = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="GAN/Discr/small")
model_params_discr_medium = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="GAN/Discr/medium")
model_params_discr_large = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="GAN/Discr/large")
model_params_discrs = [model_params_discr_small,
model_params_discr_medium,
model_params_discr_large]
#Separate batch into discrimination categories
discr_of_output_outer = [d[0] for d in _discrimination_outer]
discr_of_truth = [d[1] for d in _discrimination_outer]
discr_of_X_hat_outer = [d[2] for d in _discrimination_outer]
discr_of_output_inner = [d[0] for d in _discrimination_inner]
discr_of_truth_small = [d[1] for d in _discrimination_inner]
discr_of_X_hat_inner = [d[2] for d in _discrimination_inner]
pred_real_outer = 0.
pred_fake_outer = 0.
avg_d_grads_outer = 0.
d_losses_outer = []
pred_real_inner = 0.
pred_fake_inner = 0.
avg_d_grads_inner = 0.
d_losses_inner = []
wass_weight = 1.
gradient_penalty_weight = 10.
l2_inner_weight = 5.e-5
l2_outer_weight = 5.e-5
def get_gradient_penalty(_discr_of_X_hat, _multiscale_xhat):
grad_D_X_hat = tf.gradients(_discr_of_X_hat, [_multiscale_xhat])[0]
red_idx = [i for i in range(2, _multiscale_xhat.shape.ndims)]
slopes = tf.sqrt(1.e-8+tf.reduce_sum(tf.square(grad_D_X_hat), axis=red_idx))
gradient_penalty = tf.reduce_mean((slopes - 1.) ** 2)
return gradient_penalty
##Losses and train ops
wass_loss_for_gen_outer = 0.
wass_loss_for_gen_inner = 0.
wass_loss_for_discr_outer = 0.
wass_loss_for_discr_inner = 0.
for i in range(3): #Discrimination is on 3 scales
#wasserstein_loss_outer = discr_of_output_outer[i] - discr_of_truth[i]
#wasserstein_loss_inner = discr_of_output_inner[i] - discr_of_truth_small[i]
#wass_loss_for_discr_outer += wasserstein_loss_outer
#wass_loss_for_discr_inner += wasserstein_loss_inner
#wass_loss_for_gen_outer += -discr_of_output_outer[i]
#wass_loss_for_gen_inner += -discr_of_output_inner[i]
gradient_penalty_outer = 0.#get_gradient_penalty(discr_of_X_hat_outer[i], multiscale_xhat_outer[i])
gradient_penalty_inner = 0.#get_gradient_penalty(discr_of_X_hat_inner[i], multiscale_xhat_inner[i])
wasserstein_loss_outer = tf.pow(discr_of_truth[i]-1., 2) + tf.pow(discr_of_output_outer[i], 2)
wasserstein_loss_inner = tf.pow(discr_of_truth_small[i]-1., 2) + tf.pow(discr_of_output_inner[i], 2)
wass_loss_for_discr_outer += wasserstein_loss_outer
wass_loss_for_discr_inner += wasserstein_loss_inner
wass_loss_for_gen_outer += tf.pow(discr_of_output_outer[i]-1., 2)
wass_loss_for_gen_inner += tf.pow(discr_of_output_inner[i]-1, 2)
pred_real_outer += discr_of_truth[i]
pred_fake_outer += discr_of_output_outer[i]
avg_d_grads_outer += gradient_penalty_outer
pred_real_inner += discr_of_truth_small[i]
pred_fake_inner += discr_of_output_inner[i]
avg_d_grads_inner += gradient_penalty_inner
d_loss_outer = wass_weight*wasserstein_loss_outer + gradient_penalty_weight*gradient_penalty_outer
d_loss_inner = wass_weight*wasserstein_loss_inner + gradient_penalty_weight*gradient_penalty_inner
d_losses_outer.append(d_loss_outer)
d_losses_inner.append(d_loss_inner)
mse_inner = 200*adjusted_mse(blur_truth_small, output_inner)
mse_inner = capper_fn(mse_inner)
#mse_inner = 2.*tf.cond( mse_inner < 1, lambda: mse_inner, lambda: tf.sqrt(mse_inner+1.e-8) )
#mse_inner = tf.minimum(mse_inner, 50)
mse_outer = 200*adjusted_mse(blur_truth, output_outer)
mse0 = tf.reduce_mean( (blur_truth - output_outer)**2 )
mse_outer = capper_fn(mse_outer)
#mse_outer = 2.*tf.cond( mse_outer < 1, lambda: mse_outer, lambda: tf.sqrt(mse_outer+1.e-8) )
#mse_outer = tf.minimum(mse_outer, 50) #Safegaurd against error spikes
mse_outer_together = 200*adjusted_mse(blur_truth, blur_output_outer)
mse_outer_together = capper_fn(mse_outer_together)
#mse_outer_together = 2.*tf.cond( mse_outer < 1, lambda: mse_outer, lambda: tf.sqrt(mse_outer+1.e-8) )
#mse_inner = 10*tf.reduce_mean(tf.abs( blur_truth_small - blur_output_inner ))
#mse_outer = 10*tf.reduce_mean(tf.abs( blur_truth - blur_output_outer ))
loss = mse_outer_together + wass_loss_for_gen_outer
loss_inner = mse_inner
loss_outer = mse_outer
train_ops_discr = []
for i in range(3):
d_loss = tf.cond( train_outer_ph, lambda: d_losses_outer[i], lambda: d_losses_inner[i] )
d_train_op = tf.train.AdamOptimizer(discr_lr_ph, 0.9).minimize(
d_loss, var_list=model_params_discrs[i])
train_ops_discr.append(d_train_op)
#Provision inner network with an ancillary loss tower
train_op_trainer = tf.train.AdamOptimizer(learning_rate_ph, 0.9).minimize(
2*loss_inner, var_list=model_params_trainer)
train_op_inner_start = tf.train.AdamOptimizer(learning_rate_ph, 0.9).minimize(
loss_inner+loss_outer, var_list=model_params_inner)
train_op_inner_end = tf.train.AdamOptimizer(learning_rate_ph, 0.9).minimize(
loss_inner+loss, var_list=model_params_inner)
train_op_outer_start = tf.train.AdamOptimizer(learning_rate_ph, 0.9).minimize(
loss_outer, var_list=model_params_outer)
train_op_outer_end = tf.train.AdamOptimizer(learning_rate_ph, 0.9).minimize(
loss, var_list=model_params_outer)
start_train_ops = [train_op_inner_start, train_op_outer_start, train_op_trainer]
end_train_ops = [train_op_inner_end, train_op_outer_end, train_op_trainer]
errors = tf.to_double((100*blur_truth - 100*output_outer)**2)
return {'start_train_ops': start_train_ops,
'end_train_ops': end_train_ops,
'train_ops_discr': train_ops_discr,
'output_inner': output_inner,
'output_outer': output_outer,
'mse_inner': mse_inner,
'mse_outer': mse_outer,
'wass_loss_inner': wass_loss_for_gen_inner,
'wass_loss_outer': wass_loss_for_gen_outer,
'wass_loss_d_inner': wass_loss_for_discr_inner,
'wass_loss_d_outer': wass_loss_for_discr_outer,
'errors': errors,
"mse0": mse0
}
def flip_rotate(img):
"""Applies a random flip || rotation to the image, possibly leaving it unchanged"""
choice = np.random.randint(0, 8)
if choice == 0:
return img
if choice == 1:
return np.rot90(img, 1)
if choice == 2:
return np.rot90(img, 2)
if choice == 3:
return np.rot90(img, 3)
if choice == 4:
return np.flip(img, 0)
if choice == 5:
return np.flip(img, 1)
if choice == 6:
return np.flip(np.rot90(img, 1), 0)
if choice == 7:
return np.flip(np.rot90(img, 1), 1)
def load_image(addr, resize_size=cropsize, img_type=np.float32):
"""Read an image and make sure it is of the correct type. Optionally resize it"""
#addr = "Z:/Jeffrey-Ede/models/stem-random-walk-nin-20-1/truth-1000.tif"
try:
img = imread(addr, mode='F')
except:
img = np.zeros((cropsize,cropsize))
print("Image read failed")
if resize_size and resize_size != cropsize:
img = cv2.resize(img, (resize_size, resize_size), interpolation=cv2.INTER_AREA)
return img.astype(img_type)
def scale0to1(img):
"""Rescale image between 0 and 1"""
img = img.astype(np.float32)
min = np.min(img)
max = np.max(img)
if np.absolute(min-max) < 1.e-6:
img.fill(0.5)
else:
img = (img-min) / (max-min)
return img.astype(np.float32)
def norm_img(img):
min = np.min(img)
max = np.max(img)
if np.absolute(min-max) < 1.e-6:
img.fill(0.)
else:
a = 0.5*(min+max)
b = 0.5*(max-min)
img = (img-a) / b
return img.astype(np.float32)
def preprocess(img):
img[np.isnan(img)] = 0.
img[np.isinf(img)] = 0.
img = norm_img(img)
return img
def gen_random_walk(channel_width, channel_height=cropsize, amplitude=1, beta1=0., shift=0., steps=10):
walk = np.zeros((int(np.ceil(channel_width+shift)), channel_height))
halfway = (channel_width-1)/2
center = halfway+shift
size = int(np.ceil(channel_width+shift))
mom = 0.
y = 0.
for i in range(channel_height):
y1 = y
#Get new position and adjust momentum
step_y = random.randint(0, 1)
if step_y == 1:
mom = beta1*mom + (1-beta1)*amplitude*(1 + np.random.normal())
y += mom
else:
y = amplitude*(-1 + np.random.normal())
if y < -halfway:
y = -halfway
mom = -mom
elif y > halfway:
y = halfway
mom = -mom
#Move to position in steps
y2 = y
scale = np.sqrt(1+(y2-y1)**2)
for j in range(steps):
x = (j+1)/steps
y = (y2-y1)*x + y1
y_idx = center+y
if y_idx != np.ceil(y_idx):
if int(y_idx) < size:
walk[int(y_idx), i] += scale*(np.ceil(y_idx) - y_idx)/steps
if int(y_idx)+1 < size:
walk[int(y_idx)+1, i] += scale*(1.-(np.ceil(y_idx) - y_idx))/steps
else:
walk[int(y_idx), i] = scale*1
return walk, size
#def make_mask(use_frac, amp, steps):
# channel_size = (2+np.sqrt(4-4*4*use_frac)) / (2*use_frac)
# num_channels = cropsize / channel_size
# mask = np.zeros( (cropsize, cropsize) )
# for i in range( int(num_channels) ):
# shift = i*channel_size - np.floor(i*channel_size)
# walk, size = gen_random_walk(channel_width=channel_size, amplitude=amp, beta1=0.5, shift=shift, steps=steps)
# lower_idx = np.floor(i*channel_size)
# upper_idx = int(lower_idx)+size
# if upper_idx < cropsize:
# mask[int(lower_idx):upper_idx, :] = walk
# else:
# diff = int(upper_idx)-int(cropsize)
# mask[int(lower_idx):int(upper_idx)-diff, :] = walk[0:(size-diff), :]
# return mask
def make_mask(use_frac):
mask = inspiral(use_frac, cropsize)
return mask
def fill(data, invalid=None):
"""
Replace the value of invalid 'data' cells (indicated by 'invalid')
by the value of the nearest valid data cell
Input:
data: numpy array of any dimension
invalid: a binary array of same shape as 'data'. True cells set where data
value should be replaced.
If None (default), use: invalid = np.isnan(data)
Output:
Return a filled array.
"""
#import numpy as np
#import scipy.ndimage as nd
if invalid is None: invalid = np.isnan(data)
ind = nd.distance_transform_edt(invalid, return_distances=False, return_indices=True)
return data[tuple(ind)]
def gen_lq(img0):
img = norm_img(cv2.GaussianBlur(img0,(5,5), 2.5))
steps = 25
use_frac = 1/16
amp = 5.
mask = make_mask(use_frac)
#mask = mask.clip(0., 1.)
#print(np.sum(mask)/(512**2))
select = mask > 0
#Combine with uniform noise low detection time data is less meaningful
detection = mask*img0#mask * ( mask*img0 + 2*(1-mask)*np.random.rand(*img0.shape)*img )
lq = -np.ones(img.shape)
lq[select] = detection[select]
lq = scale0to1(lq)
lq = fill(lq, invalid=np.logical_not(mask.astype(np.bool)))
#Changed img to img0 halfway through training
return img0.astype(np.float32), lq.astype(np.float32), mask.astype(np.float32)
def inspiral(coverage, side, num_steps=10_000):
"""Duration spent at each location as a particle falls in a magnetic
field. Trajectory chosen so that the duration density is (approx.)
evenly distributed. Trajectory is calculated stepwise.
Args:
coverage: Average amount of time spent at a random pixel
side: Sidelength of square image that the motion is
inscribed on.
Returns:
Amounts of time spent at each pixel on a square image as a charged
particle inspirals.
"""
#Use size that is larger than the image
size = int(np.ceil(np.sqrt(2)*side))
#Maximum radius of motion
R = size/2
#Get constant in equation of motion
k = 1/ (2*np.pi*coverage)
#Maximum theta that is in the image
theta_max = R / k
#Equispaced steps
theta = np.arange(0, theta_max, theta_max/num_steps)
r = k * theta
#Convert to cartesian, with (0,0) at the center of the image
x = r*np.cos(theta) + R
y = r*np.sin(theta) + R
#Draw spiral
z = np.empty((x.size + y.size,), dtype=x.dtype)
z[0::2] = x
z[1::2] = y
z = list(z)
img = Image.new('F', (size,size), "black")
img_draw = ImageDraw.Draw(img)
img_draw = img_draw.line(z)
img = np.asarray(img)
img = img[size//2-side//2:size//2+side//2+side%2,
size//2-side//2:size//2+side//2+side%2]
#Blur path
#img = cv2.GaussianBlur(img,(3,3),0)
return img
def record_parser(record):
"""Parse files and generate lower quality images from them."""
img = flip_rotate(preprocess(load_image(record)))
img, lq, mask = gen_lq(img)
if np.sum(np.isfinite(img)) != cropsize**2 or np.sum(np.isfinite(lq)) != cropsize**2:
img = np.zeros((cropsize,cropsize))
lq = mask*img
return lq, img, mask
def reshaper(img1, img2, img3):
img1 = tf.reshape(img1, [cropsize, cropsize, channels])
img2 = tf.reshape(img2, [cropsize, cropsize, channels])
img3 = tf.reshape(img3, [cropsize, cropsize, channels])
return img1, img2, img3
def input_fn(dir, subset, batch_size, num_shards):
"""Create a dataset from a list of filenames and shard batches from it"""
with tf.device('/cpu:0'):
dataset = tf.data.Dataset.list_files(dir+subset+"/"+"*.tif")
dataset = dataset.shuffle(buffer_size=shuffle_buffer_size)
dataset = dataset.repeat(num_epochs)
dataset = dataset.map(
lambda file: tf.py_func(record_parser, [file], [tf.float32, tf.float32, tf.float32]),
num_parallel_calls=num_parallel_calls)
#print(dataset.output_shapes, dataset.output_types)
dataset = dataset.map(reshaper, num_parallel_calls=num_parallel_calls)
#print(dataset.output_shapes, dataset.output_types)
dataset = dataset.batch(batch_size=batch_size)
dataset = dataset.prefetch(buffer_size=prefetch_buffer_size)
iter = dataset.make_one_shot_iterator()
img_batch = iter.get_next()
if num_shards <= 1:
# No GPU available or only 1 GPU.
return [img_batch[0]], [img_batch[1]], [img_batch[2]]
else:
image_batch = tf.unstack(img_batch, num=batch_size, axis=1)
feature_shards = [[] for i in range(num_shards)]
feature_shards_truth = [[] for i in range(num_shards)]
for i in range(batch_size):
idx = i % num_shards
tensors = tf.unstack(image_batch[i], num=2, axis=0)
feature_shards[idx].append(tensors[0])
feature_shards_truth[idx].append(tensors[1])
feature_shards_mask[idx].append(tensors[2])
feature_shards = [tf.parallel_stack(x) for x in feature_shards]
feature_shards_truth = [tf.parallel_stack(x) for x in feature_shards_truth]
feature_shards_mask = [tf.parallel_stack(x) for x in feature_shards_mask]
return feature_shards, feature_shards_truth, feature_shards_mask
def disp(img):
cv2.namedWindow('CV_Window', cv2.WINDOW_NORMAL)
cv2.imshow('CV_Window', scale0to1(img))
cv2.waitKey(0)
return
if disp_select:
disp(select)
class RunConfig(tf.contrib.learn.RunConfig):
def uid(self, whitelist=None):
"""
Generates a 'Unique Identifier' based on all internal fields.
Caller should use the uid string to check `RunConfig` instance integrity
in one session use, but should not rely on the implementation details, which
is subject to change.
Args:
whitelist: A list of the string names of the properties uid should not
include. If `None`, defaults to `_DEFAULT_UID_WHITE_LIST`, which
includes most properties user allowes to change.
Returns:
A uid string.
"""
if whitelist is None:
whitelist = run_config._DEFAULT_UID_WHITE_LIST
state = {k: v for k, v in self.__dict__.items() if not k.startswith('__')}
# Pop out the keys in whitelist.
for k in whitelist:
state.pop('_' + k, None)
ordered_state = collections.OrderedDict(
sorted(state.items(), key=lambda t: t[0]))
# For class instance without __repr__, some special cares are required.
# Otherwise, the object address will be used.
if '_cluster_spec' in ordered_state:
ordered_state['_cluster_spec'] = collections.OrderedDict(
sorted(ordered_state['_cluster_spec'].as_dict().items(), key=lambda t: t[0]))
return ', '.join(
'%s=%r' % (k, v) for (k, v) in six.iteritems(ordered_state))
def sigmoid(x,shift=0,mult=1):
return 1 / (1 + np.exp(-(x+shift)*mult))
def main(job_dir, data_dir, variable_strategy, num_gpus, log_device_placement,
num_intra_threads, **hparams):
tf.reset_default_graph()
temp = set(tf.all_variables())
with open(log_file, 'a') as log:
log.flush()
# The env variable is on deprecation path, default is set to off.
os.environ['TF_SYNC_ON_FINISH'] = '0'
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'
#with tf.device("/cpu:0"):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) #For batch normalisation windows
with tf.control_dependencies(update_ops):
# Session configuration.
sess_config = tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,#Once placement is correct, this fills up too much of the cmd window...
intra_op_parallelism_threads=num_intra_threads,
gpu_options=tf.GPUOptions(force_gpu_compatible=True, allow_growth=True))
config = RunConfig(
session_config=sess_config, model_dir=job_dir)
hparams=tf.contrib.training.HParams(
is_chief=config.is_chief,
**hparams)
img, img_truth, img_mask = input_fn(data_dir, 'test', batch_size, num_gpus)
img_val, img_truth_val, img_mask_val = input_fn(data_dir, 'test', batch_size, num_gpus)
with tf.Session(config=sess_config) as sess:
print("Session started")
sess.run(tf.initialize_variables(set(tf.all_variables()) - temp))
temp = set(tf.all_variables())
____img, ____img_truth, ____img_mask = sess.run([img, img_truth, img_mask])
img_ph = [tf.placeholder(tf.float32, shape=i.shape, name='img')
for i in ____img]
img_truth_ph = [tf.placeholder(tf.float32, shape=i.shape, name='img_truth')
for i in ____img_truth]
img_mask_ph = [tf.placeholder(tf.float32, shape=i.shape, name='img_mask')
for i in ____img_truth]
is_training = True
print("Dataflow established")
learning_rate_ph = tf.placeholder(tf.float32, name='learning_rate')
discr_learning_rate_ph = tf.placeholder(tf.float32, name='discr_learning_rate')
beta1_ph = tf.placeholder(tf.float32, shape=(), name='beta1')
discr_beta1_ph = tf.placeholder(tf.float32, shape=(), name='discr_beta1')
norm_decay_ph = tf.placeholder(tf.float32, shape=(), name='norm_decay')
train_outer_ph = tf.placeholder(tf.bool, name='train_outer')
ramp_ph = tf.placeholder(tf.float32, name='ramp')
#########################################################################################
exp_dict = experiment(img_ph[0], img_truth_ph[0], img_mask_ph[0],
learning_rate_ph, discr_learning_rate_ph,
beta1_ph, discr_beta1_ph, norm_decay_ph,
train_outer_ph, ramp_ph, initialize=True)
print("Created experiment")
sess.run( tf.initialize_variables( set(tf.all_variables())-temp),
feed_dict={beta1_ph: np.float32(0.9), discr_beta1_ph: np.float32(0.5)} )
train_writer = tf.summary.FileWriter( logDir, sess.graph )
#print(tf.all_variables())
saver = tf.train.Saver(max_to_keep=1)
#saver.restore(sess, tf.train.latest_checkpoint(model_dir+"model/"))
saver.restore(sess, tf.train.latest_checkpoint(model_dir+"notable_ckpts/"))
counter = 0
val_counter = 0
save_counter = counter
counter_init = counter+1
base_rate = 0.0001
bad_buffer_size = 50
bad_buffer_truth = []
bad_buffer = []
bad_buffer_mask = []
for _ in range(bad_buffer_size):
lq, buffer_img, mask = sess.run([img, img_truth, img_mask])
bad_buffer_truth.append(buffer_img)
bad_buffer.append(lq)
bad_buffer_mask.append(mask)
bad_buffer_prob = 0.2
bad_buffer_beta = 0.99
bad_buffer_thresh = 0.
bad_buffer_tracker = bad_buffer_prob
bad_buffer_tracker_beta = 0.99
bad_buffer_num_uses = 1
#Here our 'natural' statistics are MSEs
nat_stat_mean_beta = 0.99
nat_stat_std_dev_beta = 0.99
nat_stat_mean = 1.5
nat_stat2_mean = 4.
total_iters = 1_000_000
discr_beta1 = 0.5
discr_learning_rate = 0.0001
wass_iter = 1
train_discr_per_gen = 1 #Number of discriminator training ops per generator training op
num_steps_in_lr_decay = 8
mses = []
max_count = 50
total_errors = None
print("Starting training")
while True:
#Train for a couple of hours
time0 = time.time()
while time.time()-time0 < modelSavePeriod:
if not val_counter % val_skip_n:
val_counter = 0
val_counter += 1
if val_counter % val_skip_n: #Only increment on nan-validation iterations
if not wass_iter % train_discr_per_gen:
counter += 1
wass_iter = 1
gen_train = True
else:
gen_train = False
wass_iter += 1
if counter < 0.25*total_iters:
rate = 3*base_rate
beta1 = 0.9
elif counter < 0.5*total_iters:
len_iters = 0.25*total_iters
rel_iters = counter - 0.25*total_iters
step = int(num_steps_in_lr_decay*rel_iters/len_iters)
rate = 3*base_rate * (1 - step/num_steps_in_lr_decay)
beta1 = 0.9 - 0.4*step/num_steps_in_lr_decay
#elif counter == total_iters//2:
# saver.save(sess, save_path=model_dir+"model/model", global_step=counter)
# quit()
elif counter < 0.75*total_iters:
rate = base_rate
beta1 = 0.5
elif counter < total_iters:
#Stepped linear decay
rel_iters = counter - 0.75*total_iters
step = int(num_steps_in_lr_decay*rel_iters/(0.25*total_iters))
rate = base_rate * ( 1. - step/num_steps_in_lr_decay )
beta1 = 0.5
if counter in [total_iters//2, total_iters]:
saver.save(sess, save_path=model_dir+"notable_ckpts/model", global_step=counter)
#if counter == total_iters:
quit()
learning_rate = np.float32(rate)
if counter < 0.5*total_iters:
norm_decay = 0.99
else:
norm_decay = 1.
ramp = 1.
train_outer = True
base_dict = { learning_rate_ph: learning_rate,
discr_learning_rate_ph: np.float32(discr_learning_rate),
beta1_ph: np.float32(beta1),
discr_beta1_ph: np.float32(discr_beta1),
norm_decay_ph: np.float32(norm_decay),
train_outer_ph: np.bool(train_outer),
ramp_ph: np.float32(ramp)
}
use_buffer = False#np.random.rand() < bad_buffer_num_uses*bad_buffer_prob
if use_buffer:
idx = np.random.randint(0, bad_buffer_size)
_img = bad_buffer[idx]
_img_truth = bad_buffer_truth[idx]
_img_mask = bad_buffer_mask[idx]
print("From buffer")
else:
_img, _img_truth, _img_mask = sess.run([img, img_truth, img_mask])
#disp(_img_mask[0][0])
dict = base_dict.copy()
dict.update( { img_ph[0]: _img[0], img_truth_ph[0]: _img_truth[0], img_mask_ph[0]: _img_mask[0] } )
if counter < max_count:
print(f"Iter: {counter}")
final_output = sess.run(exp_dict["output_outer"], feed_dict=dict)
Image.fromarray(_img[0].reshape(cropsize, cropsize).astype(np.float32)).save(
model_dir+f"partial_scan-{counter}.tif" )
Image.fromarray((0.5*final_output+0.5).reshape(cropsize, cropsize).astype(np.float32)).save(
model_dir+f"output-{counter}.tif" )
Image.fromarray((0.5*_img_truth[0]+0.5).reshape(cropsize, cropsize).astype(np.float32)).save(
model_dir+f"truth-{counter}.tif" )
Image.fromarray(_img_mask[0].reshape(cropsize, cropsize).astype(np.float32)).save(
model_dir+f"mask-{counter}.tif" )
else:
quit()
#if counter < 0.5*total_iters:
# train_ops = exp_dict['start_train_ops']
#else:
# train_ops = exp_dict['end_train_ops'] if gen_train else []
# train_ops += exp_dict['train_ops_discr']
#other_ops = [exp_dict['mse_inner'], exp_dict['mse_outer'], exp_dict['wass_loss_outer'], exp_dict['wass_loss_d_outer']]
#output_ops = [exp_dict['output_outer']]
#output_size = cropsize
##Save outputs occasionally
#if 0 <= counter <= 1 or not counter % save_result_every_n_batches or (0 <= counter < 10000 and not counter % 1000) or counter == counter_init:
# #Don't train on validation examples
# if not val_counter % val_skip_n:
# results = sess.run( other_ops + output_ops, feed_dict=dict )
# else:
# results = sess.run( other_ops + output_ops + train_ops, feed_dict=dict )
# mse_in = results[0]
# mse = results[1]
# wass_loss = results[2]
# wass_d_loss = results[3]
# output = results[len(other_ops)]
# try:
# save_input_loc = model_dir+"input-"+str(counter)+".tif"
# save_truth_loc = model_dir+"truth-"+str(counter)+".tif"
# save_output_loc = model_dir+"output-"+str(counter)+".tif"
# save_mask_loc = model_dir+"mask-"+str(counter)+".tif"
# Image.fromarray((_img[0]).reshape(cropsize, cropsize).astype(np.float32)).save( save_input_loc )
# Image.fromarray((0.5*_img_truth[0]+0.5).reshape(cropsize, cropsize).astype(np.float32)).save( save_truth_loc )
# Image.fromarray((0.5*output+0.5).reshape(output_size, output_size).astype(np.float32)).save( save_output_loc )
# Image.fromarray((_img_mask[0]).reshape(cropsize, cropsize).astype(np.float32)).save( save_mask_loc )
# except:
# print("Image save failed")
#else:
# #Don't train on validation examples
# if not val_counter % val_skip_n:
# results = sess.run( other_ops, feed_dict=dict )
# else:
# results = sess.run( other_ops + train_ops, feed_dict=dict )
# mse_in = results[0]
# mse = results[1]
# wass_loss = results[2]
# wass_d_loss = results[3]
#nat_stat_mean = (nat_stat_mean_beta*nat_stat_mean +
# (1.-nat_stat_mean_beta)*mse)
#nat_stat2_mean = (nat_stat_std_dev_beta*nat_stat2_mean +
# (1.-nat_stat_std_dev_beta)*mse**2)
#nat_stat_std_dev = np.sqrt(nat_stat2_mean - nat_stat_mean**2)
##Decide whether or not to add to buffer using natural statistics
#if not use_buffer and mse > bad_buffer_thresh:
# idx = np.random.randint(0, bad_buffer_size)
# bad_buffer[idx] = _img
# bad_buffer_truth[idx] = _img_truth
# bad_buffer_mask[idx] = _img_mask
# bad_buffer_tracker = ( bad_buffer_tracker_beta*bad_buffer_tracker +
# (1.-bad_buffer_tracker_beta) )
# print("To buffer")#, bad_buffer_thresh, bad_buffer_prob, bad_buffer_tracker)
#else:
# bad_buffer_tracker = bad_buffer_tracker_beta*bad_buffer_tracker
#if bad_buffer_tracker < bad_buffer_prob:
# step = nat_stat_mean-5*nat_stat_std_dev
# bad_buffer_thresh = bad_buffer_beta*bad_buffer_thresh + (1.-bad_buffer_beta)*step
#if bad_buffer_tracker >= bad_buffer_prob:
# step = nat_stat_mean+5*nat_stat_std_dev
# bad_buffer_thresh = bad_buffer_beta*bad_buffer_thresh + (1.-bad_buffer_beta)*step
#message = "NiN-44, Iter: {}, MSE_in: {}, MSE: {}, Wass G: {}, Wass D: {}, Val: {}".format(
# counter, 3.5/2*mse_in, 3.5/2*mse, wass_loss, wass_d_loss,
# 1 if not val_counter % val_skip_n else 0)
#print(message)
#try:
# log.write(message)
#except:
# print("Write to log failed")
#Save the model
#saver.save(sess, save_path=model_dir+"model/model", global_step=counter)
save_counter = counter
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--data-dir',
type=str,
default=data_dir,
help='The directory where the CIFAR-10 input data is stored.')
parser.add_argument(
'--job-dir',
type=str,
default=model_dir,
help='The directory where the model will be stored.')
parser.add_argument(
'--variable-strategy',
choices=['CPU', 'GPU'],
type=str,
default='GPU',
help='Where to locate variable operations')
parser.add_argument(
'--num-gpus',
type=int,
default=num_gpus,
help='The number of gpus used. Uses only CPU if set to 0.')
parser.add_argument(
'--log-device-placement',
action='store_true',
default=True,
help='Whether to log device placement.')
parser.add_argument(
'--num-intra-threads',
type=int,
default=0,
help="""\
Number of threads to use for intra-op parallelism. When training on CPU
set to 0 to have the system pick the appropriate number or alternatively
set it to the number of physical CPU cores.\
""")
parser.add_argument(
'--train-steps',
type=int,
default=80000,
help='The number of steps to use for training.')
parser.add_argument(
'--train-batch-size',
type=int,
default=batch_size,
help='Batch size for training.')
parser.add_argument(
'--eval-batch-size',
type=int,
default=batch_size,
help='Batch size for validation.')
parser.add_argument(
'--momentum',
type=float,
default=0.9,
help='Momentum for MomentumOptimizer.')
parser.add_argument(
'--learning-rate',
type=float,
default=0.1,
help="""\
This is the inital learning rate value. The learning rate will decrease
during training. For more details check the model_fn implementation in
this file.\
""")
parser.add_argument(
'--sync',
action='store_true',
default=False,
help="""\
If present when running in a distributed environment will run on sync mode.\
""")
parser.add_argument(
'--num-inter-threads',
type=int,
default=0,
help="""\
Number of threads to use for inter-op parallelism. If set to 0, the
system will pick an appropriate number.\
""")
parser.add_argument(
'--data-format',
type=str,
default="NHWC",
help="""\
If not set, the data format best for the training device is used.
Allowed values: channels_first (NCHW) channels_last (NHWC).\
""")
parser.add_argument(
'--batch-norm-decay',
type=float,
default=0.997,
help='Decay for batch norm.')
parser.add_argument(
'--batch-norm-epsilon',
type=float,
default=1e-5,
help='Epsilon for batch norm.')
args = parser.parse_args()
if args.num_gpus > 0:
assert tf.test.is_gpu_available(), "Requested GPUs but none found."
if args.num_gpus < 0:
raise ValueError(
'Invalid GPU count: \"--num-gpus\" must be 0 or a positive integer.')
if args.num_gpus == 0 and args.variable_strategy == 'GPU':
raise ValueError('num-gpus=0, CPU must be used as parameter server. Set'
'--variable-strategy=CPU.')
if args.num_gpus != 0 and args.train_batch_size % args.num_gpus != 0:
raise ValueError('--train-batch-size must be multiple of --num-gpus.')
if args.num_gpus != 0 and args.eval_batch_size % args.num_gpus != 0:
raise ValueError('--eval-batch-size must be multiple of --num-gpus.')
main(**vars(args))
| 40.715856 | 176 | 0.561883 |
794429cd3ed5c58ebc3e5963e67aa91409f372aa | 1,091 | py | Python | cgi-bin/log.py | msolivera/Phyton | 1322fa2ff4bb06a17350fefa7e5268c0969e5b53 | [
"bzip2-1.0.6"
] | null | null | null | cgi-bin/log.py | msolivera/Phyton | 1322fa2ff4bb06a17350fefa7e5268c0969e5b53 | [
"bzip2-1.0.6"
] | null | null | null | cgi-bin/log.py | msolivera/Phyton | 1322fa2ff4bb06a17350fefa7e5268c0969e5b53 | [
"bzip2-1.0.6"
] | null | null | null | import cgi, cgitb
cgitb.enable()
print ("Content-Type: text/html")
print ("")
params = cgi.FieldStorage()
logueado = False
mensaje_error = None
usuario = params.getvalue("usuario")
password = params.getvalue("contrasena")
if not usuario or not password:
mensaje_error = "Debes ingresar usuario y contraseña"
elif usuario == "yo" and password == "yo":
logueado = True
else:
mensaje_error = "Credenciales incorrectas"
# Abrimos la pagina HTML
print ("""
<html>
<head>
<title>Formulario HTML</title>
</head>
<body>
""")
# Si las credenciales fueron correctas, mostramos bienvenida
if logueado:
print ("Bienvenido,", usuario)
# Si no esta logueado, mostramos formulario
else:
# Si ocurre un error, mostramos el mensaje antes del form
if mensaje_error:
print (mensaje_error)
# Mostramos el formulario de ingreso
print ("""
<form method="post">
<label>Usuario</label>
<input type="text" name="usuario" />
<label>Contraseña</label>
<input type="password" name="contrasena" />
<input type="submit" value="Ingresar" />
</form>
""")
# Finalizamos el HTML
print ("""
</body>
</html>
""")
| 23.212766 | 60 | 0.72319 |
79442a00dcab0f25d9c3f7cfbf7ba17aeb4a23a9 | 2,933 | py | Python | Agents/seeker.py | hanhha/ACTS | dca59ed487cd1581b54e510cc286bd3482d1cb0a | [
"MIT"
] | 1 | 2022-02-04T03:13:38.000Z | 2022-02-04T03:13:38.000Z | Agents/seeker.py | alimogh/ACTS | dca59ed487cd1581b54e510cc286bd3482d1cb0a | [
"MIT"
] | null | null | null | Agents/seeker.py | alimogh/ACTS | dca59ed487cd1581b54e510cc286bd3482d1cb0a | [
"MIT"
] | 1 | 2022-02-04T03:13:40.000Z | 2022-02-04T03:13:40.000Z | #!/usr/bin/env python3
from pandas import DataFrame
from . import misc_utils as misc
from . import ta
class BaseSeeker (misc.BPA):
def __init__ (self, source, params):
misc.BPA.__init__ (self, source = source, params = params)
self.archive = list ()
self.pdarchive = DataFrame ()
self.archive_len = 0
if len(params) > 0:
self.setParams (params)
self._investment = 0
self._qty_bought = 0
self.studies = dict ()
self.prediction = dict ()
self.last_calculations_ans = dict ()
def setParams (self, params):
misc.BPA.setParams (self, params)
self._goal = self._params ['goal']
self._fee = self._params ['fee']
def check_gainable (self, bprice, gain):
res = gain * (1 - self._fee) > bprice * self._fee * 2
return res
def check_profit_achieved (self, price):
gprice = price * (1 - self._fee)
return (gprice * self._qty_bought) > (self._investment)
def check_goal_achieved (self, price):
gprice = price * (1 - self._fee)
return (gprice * self._qty_bought) > (self._investment*(1 + self._goal))
def store (self, data):
self.archive.append (data)
self.pdarchive = self.pdarchive.append (data, ignore_index = True)
self.archive_len += 1
self.prediction = dict ()
def predict (self, factor, data):
if (self.archive_len > 0 and data ['T'] != self.archive [-1]['T']) or (self.archive_len == 0):
self.store (data)
return self.call_predict (factor, data) if factor not in self.prediction else self.prediction [factor]
def call_predict (self, factor, data):
if factor == 'hitgoal':
return self.check_goal_achieved (data['O'] if data['C'] > data['O'] else data['C'])
if factor == 'trend':
return self.predict_trend (data)
if factor == 'profitable':
return self.predict_profitable (data)
if factor == 'harvestable':
return self.predict_harvestable (data)
def predict_trend (self, data):
ptrend = True
self.prediction ['trend'] = ptrend
return ptrend
def predict_profitable (self, data):
ptb = True
self.prediction ['profitable'] = ptb
return ptb
def predict_harvestable (self, data):
hvb = True
self.prediction ['harvestable'] = hvb
return hvb
def CallBack (self, data):
if data[0] == 'buy':
self._investment = data[2]['price'] + data[2]['fee']
self._qty_bought = data[2]['qty']
def BkdrCallBack (self, data):
if (self.archive_len > 0 and data ['T'] != self.archive [-1]['T']) or (self.archive_len == 0):
self.store (data)
def printCandleStick (self, data):
good = data['C'] > data['O']
self.shout ("Candle stick at {t}".format(t = str(data['T'])))
self.shout ("O = {o} : C = {c} : H = {h} : L = {l}".format (h = data['H'], c = data['C'], o = data['O'], l = data['L']), good = good)
self.shout ("Bid = {b} : Ask = {a} : Last = {l}".format (b = data['Bid'], a = data['Ask'], l = data['Last']), good = good)
self.shout ("BV = {bv} : V = {v}".format (bv = data['BV'], v = data ['V']), good = good)
| 32.230769 | 135 | 0.643028 |
79442a1ac12589c85927f591b21034bc06b8e4ba | 606 | py | Python | make_page_list.py | mscroggs/HUSFAX | 7fa7d2fcc37b349b070f39c0b26208496fe74bc7 | [
"MIT"
] | null | null | null | make_page_list.py | mscroggs/HUSFAX | 7fa7d2fcc37b349b070f39c0b26208496fe74bc7 | [
"MIT"
] | null | null | null | make_page_list.py | mscroggs/HUSFAX | 7fa7d2fcc37b349b070f39c0b26208496fe74bc7 | [
"MIT"
] | 1 | 2020-12-26T15:46:28.000Z | 2020-12-26T15:46:28.000Z | #!/usr/bin/env python3
from ceefax import config
from ceefax.page import PageManager
from ceefax.cupt import DummyScreen
import os
import config as _c
config.ceefax_path = os.path.dirname(os.path.realpath(__file__))
config.pages_dir = os.path.join(config.ceefax_path, "pages")
config.NAME = "HUSFAX"
for i, j in _c.__dict__.items():
setattr(config, i, j)
page_manager = PageManager(DummyScreen())
with open("PAGES.md", "w") as f:
f.write("| No. | Page title |\n")
f.write("| --- | ---------- |\n")
for n, page in page_manager.sorted_pages():
f.write(f"| {n} | {page.title} |\n")
| 28.857143 | 64 | 0.669967 |
79442acccb3262d642f2be315586d33d6876ffcc | 10,989 | py | Python | pandas/tests/frame/methods/test_reset_index.py | timhunderwood/pandas | 0159cba6eb14983ab7eaf38ff138c3c397a6fe3b | [
"BSD-3-Clause"
] | 3 | 2020-07-02T12:59:52.000Z | 2020-08-27T20:05:44.000Z | pandas/tests/frame/methods/test_reset_index.py | timhunderwood/pandas | 0159cba6eb14983ab7eaf38ff138c3c397a6fe3b | [
"BSD-3-Clause"
] | 1 | 2016-10-23T21:07:28.000Z | 2016-10-23T21:07:28.000Z | pandas/tests/frame/methods/test_reset_index.py | timhunderwood/pandas | 0159cba6eb14983ab7eaf38ff138c3c397a6fe3b | [
"BSD-3-Clause"
] | 2 | 2020-12-11T05:33:38.000Z | 2022-01-16T12:42:17.000Z | from datetime import datetime
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestResetIndex:
def test_reset_index_tz(self, tz_aware_fixture):
# GH 3950
# reset_index with single level
tz = tz_aware_fixture
idx = date_range("1/1/2011", periods=5, freq="D", tz=tz, name="idx")
df = DataFrame({"a": range(5), "b": ["A", "B", "C", "D", "E"]}, index=idx)
expected = DataFrame(
{
"idx": [
datetime(2011, 1, 1),
datetime(2011, 1, 2),
datetime(2011, 1, 3),
datetime(2011, 1, 4),
datetime(2011, 1, 5),
],
"a": range(5),
"b": ["A", "B", "C", "D", "E"],
},
columns=["idx", "a", "b"],
)
expected["idx"] = expected["idx"].apply(lambda d: Timestamp(d, tz=tz))
tm.assert_frame_equal(df.reset_index(), expected)
def test_reset_index_with_intervals(self):
idx = IntervalIndex.from_breaks(np.arange(11), name="x")
original = DataFrame({"x": idx, "y": np.arange(10)})[["x", "y"]]
result = original.set_index("x")
expected = DataFrame({"y": np.arange(10)}, index=idx)
tm.assert_frame_equal(result, expected)
result2 = result.reset_index()
tm.assert_frame_equal(result2, original)
def test_reset_index(self, float_frame):
stacked = float_frame.stack()[::2]
stacked = DataFrame({"foo": stacked, "bar": stacked})
names = ["first", "second"]
stacked.index.names = names
deleveled = stacked.reset_index()
for i, (lev, level_codes) in enumerate(
zip(stacked.index.levels, stacked.index.codes)
):
values = lev.take(level_codes)
name = names[i]
tm.assert_index_equal(values, Index(deleveled[name]))
stacked.index.names = [None, None]
deleveled2 = stacked.reset_index()
tm.assert_series_equal(
deleveled["first"], deleveled2["level_0"], check_names=False
)
tm.assert_series_equal(
deleveled["second"], deleveled2["level_1"], check_names=False
)
# default name assigned
rdf = float_frame.reset_index()
exp = Series(float_frame.index.values, name="index")
tm.assert_series_equal(rdf["index"], exp)
# default name assigned, corner case
df = float_frame.copy()
df["index"] = "foo"
rdf = df.reset_index()
exp = Series(float_frame.index.values, name="level_0")
tm.assert_series_equal(rdf["level_0"], exp)
# but this is ok
float_frame.index.name = "index"
deleveled = float_frame.reset_index()
tm.assert_series_equal(deleveled["index"], Series(float_frame.index))
tm.assert_index_equal(deleveled.index, Index(np.arange(len(deleveled))))
# preserve column names
float_frame.columns.name = "columns"
resetted = float_frame.reset_index()
assert resetted.columns.name == "columns"
# only remove certain columns
df = float_frame.reset_index().set_index(["index", "A", "B"])
rs = df.reset_index(["A", "B"])
# TODO should reset_index check_names ?
tm.assert_frame_equal(rs, float_frame, check_names=False)
rs = df.reset_index(["index", "A", "B"])
tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False)
rs = df.reset_index(["index", "A", "B"])
tm.assert_frame_equal(rs, float_frame.reset_index(), check_names=False)
rs = df.reset_index("A")
xp = float_frame.reset_index().set_index(["index", "B"])
tm.assert_frame_equal(rs, xp, check_names=False)
# test resetting in place
df = float_frame.copy()
resetted = float_frame.reset_index()
df.reset_index(inplace=True)
tm.assert_frame_equal(df, resetted, check_names=False)
df = float_frame.reset_index().set_index(["index", "A", "B"])
rs = df.reset_index("A", drop=True)
xp = float_frame.copy()
del xp["A"]
xp = xp.set_index(["B"], append=True)
tm.assert_frame_equal(rs, xp, check_names=False)
def test_reset_index_name(self):
df = DataFrame(
[[1, 2, 3, 4], [5, 6, 7, 8]],
columns=["A", "B", "C", "D"],
index=Index(range(2), name="x"),
)
assert df.reset_index().index.name is None
assert df.reset_index(drop=True).index.name is None
df.reset_index(inplace=True)
assert df.index.name is None
def test_reset_index_level(self):
df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "C", "D"])
for levels in ["A", "B"], [0, 1]:
# With MultiIndex
result = df.set_index(["A", "B"]).reset_index(level=levels[0])
tm.assert_frame_equal(result, df.set_index("B"))
result = df.set_index(["A", "B"]).reset_index(level=levels[:1])
tm.assert_frame_equal(result, df.set_index("B"))
result = df.set_index(["A", "B"]).reset_index(level=levels)
tm.assert_frame_equal(result, df)
result = df.set_index(["A", "B"]).reset_index(level=levels, drop=True)
tm.assert_frame_equal(result, df[["C", "D"]])
# With single-level Index (GH 16263)
result = df.set_index("A").reset_index(level=levels[0])
tm.assert_frame_equal(result, df)
result = df.set_index("A").reset_index(level=levels[:1])
tm.assert_frame_equal(result, df)
result = df.set_index(["A"]).reset_index(level=levels[0], drop=True)
tm.assert_frame_equal(result, df[["B", "C", "D"]])
# Missing levels - for both MultiIndex and single-level Index:
for idx_lev in ["A", "B"], ["A"]:
with pytest.raises(KeyError, match=r"(L|l)evel \(?E\)?"):
df.set_index(idx_lev).reset_index(level=["A", "E"])
with pytest.raises(IndexError, match="Too many levels"):
df.set_index(idx_lev).reset_index(level=[0, 1, 2])
def test_reset_index_right_dtype(self):
time = np.arange(0.0, 10, np.sqrt(2) / 2)
s1 = Series(
(9.81 * time ** 2) / 2, index=Index(time, name="time"), name="speed"
)
df = DataFrame(s1)
resetted = s1.reset_index()
assert resetted["time"].dtype == np.float64
resetted = df.reset_index()
assert resetted["time"].dtype == np.float64
def test_reset_index_multiindex_col(self):
vals = np.random.randn(3, 3).astype(object)
idx = ["x", "y", "z"]
full = np.hstack(([[x] for x in idx], vals))
df = DataFrame(
vals,
Index(idx, name="a"),
columns=[["b", "b", "c"], ["mean", "median", "mean"]],
)
rs = df.reset_index()
xp = DataFrame(
full, columns=[["a", "b", "b", "c"], ["", "mean", "median", "mean"]]
)
tm.assert_frame_equal(rs, xp)
rs = df.reset_index(col_fill=None)
xp = DataFrame(
full, columns=[["a", "b", "b", "c"], ["a", "mean", "median", "mean"]]
)
tm.assert_frame_equal(rs, xp)
rs = df.reset_index(col_level=1, col_fill="blah")
xp = DataFrame(
full, columns=[["blah", "b", "b", "c"], ["a", "mean", "median", "mean"]]
)
tm.assert_frame_equal(rs, xp)
df = DataFrame(
vals,
MultiIndex.from_arrays([[0, 1, 2], ["x", "y", "z"]], names=["d", "a"]),
columns=[["b", "b", "c"], ["mean", "median", "mean"]],
)
rs = df.reset_index("a")
xp = DataFrame(
full,
Index([0, 1, 2], name="d"),
columns=[["a", "b", "b", "c"], ["", "mean", "median", "mean"]],
)
tm.assert_frame_equal(rs, xp)
rs = df.reset_index("a", col_fill=None)
xp = DataFrame(
full,
Index(range(3), name="d"),
columns=[["a", "b", "b", "c"], ["a", "mean", "median", "mean"]],
)
tm.assert_frame_equal(rs, xp)
rs = df.reset_index("a", col_fill="blah", col_level=1)
xp = DataFrame(
full,
Index(range(3), name="d"),
columns=[["blah", "b", "b", "c"], ["a", "mean", "median", "mean"]],
)
tm.assert_frame_equal(rs, xp)
def test_reset_index_multiindex_nan(self):
# GH#6322, testing reset_index on MultiIndexes
# when we have a nan or all nan
df = DataFrame(
{"A": ["a", "b", "c"], "B": [0, 1, np.nan], "C": np.random.rand(3)}
)
rs = df.set_index(["A", "B"]).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame(
{"A": [np.nan, "b", "c"], "B": [0, 1, 2], "C": np.random.rand(3)}
)
rs = df.set_index(["A", "B"]).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame({"A": ["a", "b", "c"], "B": [0, 1, 2], "C": [np.nan, 1.1, 2.2]})
rs = df.set_index(["A", "B"]).reset_index()
tm.assert_frame_equal(rs, df)
df = DataFrame(
{
"A": ["a", "b", "c"],
"B": [np.nan, np.nan, np.nan],
"C": np.random.rand(3),
}
)
rs = df.set_index(["A", "B"]).reset_index()
tm.assert_frame_equal(rs, df)
def test_reset_index_with_datetimeindex_cols(self):
# GH#5818
df = DataFrame(
[[1, 2], [3, 4]],
columns=date_range("1/1/2013", "1/2/2013"),
index=["A", "B"],
)
result = df.reset_index()
expected = DataFrame(
[["A", 1, 2], ["B", 3, 4]],
columns=["index", datetime(2013, 1, 1), datetime(2013, 1, 2)],
)
tm.assert_frame_equal(result, expected)
def test_reset_index_range(self):
# GH#12071
df = DataFrame([[0, 0], [1, 1]], columns=["A", "B"], index=RangeIndex(stop=2))
result = df.reset_index()
assert isinstance(result.index, RangeIndex)
expected = DataFrame(
[[0, 0, 0], [1, 1, 1]],
columns=["index", "A", "B"],
index=RangeIndex(stop=2),
)
tm.assert_frame_equal(result, expected)
def test_reset_index_dtypes_on_empty_frame_with_multiindex():
# GH 19602 - Preserve dtype on empty DataFrame with MultiIndex
idx = MultiIndex.from_product([[0, 1], [0.5, 1.0], ["a", "b"]])
result = DataFrame(index=idx)[:0].reset_index().dtypes
expected = Series({"level_0": np.int64, "level_1": np.float64, "level_2": object})
tm.assert_series_equal(result, expected)
| 35.678571 | 87 | 0.533352 |
79442b3a3a9baf32a304392d1cd18a75c290a707 | 19,695 | py | Python | utime/dataset/sleep_study/sleep_study.py | learning310/U-Time | f7c8e3f1368f43226872a69b0fbb8c29990e4bd9 | [
"MIT"
] | 138 | 2019-11-20T02:31:17.000Z | 2022-03-23T04:31:51.000Z | utime/dataset/sleep_study/sleep_study.py | acrarshin/U-Time | f7c8e3f1368f43226872a69b0fbb8c29990e4bd9 | [
"MIT"
] | 46 | 2019-12-04T03:13:28.000Z | 2022-03-31T13:10:48.000Z | utime/dataset/sleep_study/sleep_study.py | acrarshin/U-Time | f7c8e3f1368f43226872a69b0fbb8c29990e4bd9 | [
"MIT"
] | 42 | 2019-11-26T16:02:26.000Z | 2022-01-06T11:01:32.000Z | """
Implements the SleepStudy class which represents a sleep study (PSG)
"""
import numpy as np
from utime import errors
from utime.io.high_level_file_loaders import load_psg, load_hypnogram
from utime.preprocessing import (apply_scaling, strip_funcs, apply_strip_func,
assert_scaler, set_psg_sample_rate,
quality_control_funcs, assert_equal_length,
apply_quality_control_func)
from utime.hypnogram.utils import create_class_int_to_period_idx_dict
from utime.dataset.sleep_study.subject_dir_sleep_study_base import SubjectDirSleepStudyBase
def assert_header_fields(header):
""" Check for minimally required fields of a header dict """
check = (('sample_rate', False), ('channel_names', False), ('date', True))
for c, replace_with_none in check:
if c not in header:
if replace_with_none:
header[c] = None
else:
raise ValueError("Invalid header file loaded, did not find "
"attribute {} in header {}".format(c, header))
class SleepStudy(SubjectDirSleepStudyBase):
"""
Represents a PSG sleep study and (optionally) a manually scored hypnogram
"""
def __init__(self,
subject_dir,
psg_regex=None,
hyp_regex=None,
header_regex=None,
period_length_sec=None,
no_hypnogram=None,
annotation_dict=None,
load=False,
logger=None):
"""
Initialize a SleepStudy object from PSG/HYP data
PSG: A file that stores polysomnography (PSG) data
HYP: A file that stores the sleep stages / annotations for the PSG
Takes a path pointing to a directory in which two or more files are
located. One of those files should be a PSG (data) file and unless
no_hypnogram == True another should be a hypnogram/sleep stages/labels
file. The PSG(/HYP) files are automatically automatically inferred
using a set of simple rules when psg_regex or hyp_regex are None
(refer to the 'utime.dataset.utils.find_psg_and_hyp' function).
Otherwise, the psg_regex and/or hyp_regex is used to match against
folder content. Each regex should have exactly one match within
'subject_dir'.
Args:
subject_dir: (str) File path to a directory storing the
subject data.
psg_regex: (str) Optional regex used to select PSG file
hyp_regex: (str) Optional regex used to select HYP file
header_regex: (str) Optional regex used to select a header file
OBS: Rarely used as most formats store headers internally, or
have header paths which are inferrable from the psg_path.
period_length_sec (int) Sleep 'epoch' (segment/period) length in
seconds
no_hypnogram (bool) Initialize without ground truth data.
annotation_dict (dict) A dictionary mapping from labels in the
hyp_file_path file to integers
load (bool) Load the PSG object at init time.
logger (Logger) A Logger object
"""
super(SleepStudy, self).__init__(
subject_dir=subject_dir,
psg_regex=psg_regex,
hyp_regex=hyp_regex,
header_regex=header_regex,
period_length_sec=period_length_sec,
no_hypnogram=no_hypnogram,
annotation_dict=annotation_dict,
logger=logger
)
# Hidden attributes controlled in property functions to limit setting
# of these values to the load() function
self._scaler = None
self._scaler_obj = None
self._load_time_random_channel_selector = None
self._strip_func = None
self._quality_control_func = None
self._class_to_period_dict = None
self._sample_rate = None
self._date = None
self._org_sample_rate = None
# Define attributes that will be dumped on self.unload calls
self._none_on_unload = (
'_psg', '_date', '_org_sample_rate',
'_hypnogram', '_scaler_obj', '_class_to_period_dict'
)
# Temp fix to stop QA warnings on each load in a Queue object
self.times_loaded = 0
if load:
self.load()
def __str__(self):
if self.loaded:
t = (self.identifier, len(self.select_channels), self.date,
self.sample_rate, self.hypnogram is not False)
return "SleepStudy(loaded=True, identifier={:s}, N channels: " \
"{}, date: {}, sample_rate={:.1f}, hypnogram={})".format(*t)
else:
return repr(self)
def __repr__(self):
return "SleepStudy(loaded={}, identifier={})".format(self.loaded,
self.identifier)
@property
def class_to_period_dict(self):
"""
Returns the class_to_period_dict, which maps a class integer
(such as 0) to an array of period indices that correspond to PSG
periods/epochs/segments that in the ground truth is scored as the that
class (such as 0).
"""
return self._class_to_period_dict
@property
def load_time_random_channel_selector(self):
"""
TODO
Returns:
"""
return self._load_time_random_channel_selector
@load_time_random_channel_selector.setter
def load_time_random_channel_selector(self, channel_selector):
"""
TODO
Args:
channel_selector:
Returns:
"""
if channel_selector and self.select_channels:
raise RuntimeError("Setting the 'load_time_random_channel_selector' "
"attribute is not possible with set values in "
"'select_channels'")
from utime.io.channels import RandomChannelSelector
if channel_selector is not None and not \
isinstance(channel_selector, RandomChannelSelector):
raise TypeError("Expected 'channel_selector' argument to be of "
"type {}, got {}".format(type(RandomChannelSelector),
type(channel_selector)))
self._load_time_random_channel_selector = channel_selector
@property
def sample_rate(self):
""" Returns the currently set sample rate """
return self._sample_rate
@sample_rate.setter
def sample_rate(self, sample_rate):
"""
Set a new sample rate
Is considered at load-time
Setting with self.loaded == True forces a reload.
"""
sample_rate = int(sample_rate)
if sample_rate <= 0:
raise ValueError("Sample rate must be a positive integer, "
"got {}".format(sample_rate))
self._sample_rate = sample_rate
if self.loaded:
self.reload(warning=True)
@property
def org_sample_rate(self):
"""
Returns the original sample rate
"""
return self._org_sample_rate
@property
def date(self):
""" Returns the recording date, may be None """
return self._date
@property
def scaler(self):
""" Returns the scaler type (string), see setter method """
return self._scaler
@scaler.setter
def scaler(self, scaler):
"""
Sets a scaler type.
Is considered at load-time
Setting with self.loaded == True forces a reload.
Args:
scaler: String, naming a sklearn.preprocessing scaler.
"""
if not assert_scaler(scaler):
raise ValueError("Invalid scaler, does not exist {}".format(scaler))
self._scaler = scaler
if self.loaded:
self.reload(warning=True)
@property
def scaler_obj(self):
""" Reference to the scaler object """
return self._scaler_obj
@property
def strip_func(self):
"""
See setter method
strip_func - when set - is a 2-tuple (strip_func_name, kwargs)
"""
return self._strip_func
def set_strip_func(self, strip_func_str, **kwargs):
"""
Sets a strip function. Strip functions are applied to the PSG/HYP pair
at load time and may deal with minor differences between the length
of the PSG and HYP (e.g. if the PSG is longer than the HYP file).
See utime.preprocessing.strip_funcs
Forces a reload if self.loaded is True
Args:
strip_func_str: A string naming a strip_func in:
utime.preprocessing.strip_funcs
kwargs: Other kw arguments that will be passed to the strip
function.
"""
if strip_func_str not in strip_funcs.__dict__:
self.raise_err(ValueError, "Invalid strip function "
"{}".format(strip_func_str))
self._strip_func = (strip_func_str, kwargs)
if self.loaded:
self.reload(warning=True)
@property
def quality_control_func(self):
""" See setter method """
return self._quality_control_func
def set_quality_control_func(self, quality_control_func, **kwargs):
"""
Sets a quality control function which is applied to all segments
of a PSG (as determined by self.period_length_sec) and may alter the
values of said segments.
Applies at load-time, forces a reload if self.loaded == True.
Args:
qc_func: A string naming a quality control function in:
utime.preprocessing.quality_control_funcs
**kwargs: Parameters passed to the quality control func at load
"""
if quality_control_func not in quality_control_funcs.__dict__:
self.raise_err(ValueError, "Invalid quality control function "
"{}".format(quality_control_func))
self._quality_control_func = (quality_control_func, kwargs)
if self.loaded:
self.reload(warning=True)
@property
def loaded(self):
"""
Returns whether the SleepStudy data is currently loaded or not
"""
return not any((self.psg is None,
self.hypnogram is None))
def _load_with_any_in(self, channel_sets):
"""
Normally not called directly, usually called from self._load.
Circulates a list of lists of proposed channel names to load,
attempting to load using any of the sets (in specified order), raising
ChannelNorFound error if none of the sets could be loaded.
Args:
channel_sets: List of lists of strings, each naming a channel to
load.
Returns:
If one of the sets of channels could be loaded, returns the
PSG array of shape [-1, n_channels], the header and the list of
channels that were successfully loaded (an elem. of channel_sets).
"""
for i, channel_set in enumerate(channel_sets):
try:
if self.load_time_random_channel_selector:
# On reloads, the set_load_channel group will have been set
# if using a load_time_channel_selector, remove it here.
channel_set = None
temp = self.load_time_random_channel_selector
psg, header = load_psg(psg_file_path=self.psg_file_path,
load_channels=channel_set or None,
load_time_channel_selector=temp,
header_file_path=self.header_file_path)
return psg, header
except errors.ChannelNotFoundError as e:
if i < len(channel_sets) - 1:
# Try nex set of channels
continue
else:
s, sa = self.select_channels, \
self.alternative_select_channels
err = errors.ChannelNotFoundError("Could not load "
"select_channels {} or "
"alternative_select_"
"channels "
"{}".format(s, sa))
raise err from e
def _load(self):
"""
Loads data from the PSG and HYP files
-- If self.select_channels is set (aka non empty list), only the column
names matching this list will be kept.
-- PSG data is kept as a numpy array. Use self.select_channels to map
between names and the numpy array
-- If self.scaler is set, the PSG array will be scaled according to
the specified sklearn.preprocessing scaler
-- If self.hyp_strip_func is set, this function will be applied to the
hypnogram object.
"""
self._psg, header = self._load_with_any_in(self._try_channels)
self._set_loaded_channels(header['channel_names'])
self._set_header_fields(header)
if self.hyp_file_path is not None and not self.no_hypnogram:
self._hypnogram, \
self.annotation_dict = load_hypnogram(self.hyp_file_path,
period_length_sec=self.period_length_sec,
annotation_dict=self.annotation_dict,
sample_rate=header["sample_rate"])
else:
self._hypnogram = False
if self.strip_func:
# Strip the data using the passed function on the passed class
self._psg, self._hypnogram = apply_strip_func(self,
self.org_sample_rate)
elif self.hypnogram and not assert_equal_length(self.psg,
self.hypnogram,
self.org_sample_rate):
self.raise_err(RuntimeError, "PSG and hypnogram are not equally "
"long in seconds. Consider setting a "
"strip_function. "
"See utime.preprocessing.strip_funcs.")
if self.quality_control_func:
# Run over epochs and assess if epoch-specific changes should be
# made to limit the influence of very high noise level ecochs etc.
self._psg = apply_quality_control_func(self,
self.org_sample_rate,
not bool(self.times_loaded))
# Set different sample rate of PSG?
if self.org_sample_rate != self.sample_rate:
self._psg = set_psg_sample_rate(self._psg,
new_sample_rate=self.sample_rate,
old_sample_rate=self.org_sample_rate)
if self.scaler:
self._psg, self._scaler_obj = apply_scaling(self.psg, self.scaler)
# Store dictionary mapping class integers to period idx of that class
if self.hypnogram:
self._class_to_period_dict = create_class_int_to_period_idx_dict(
self.hypnogram
)
# Ensure converted to float32 ndarray
self._psg = self._psg.astype(np.float32)
self.times_loaded += 1
def _set_header_fields(self, header):
"""
TODO
Args:
header:
Returns:
"""
# Ensure all header information is available
assert_header_fields(header)
self._date = header["date"]
self._org_sample_rate = header["sample_rate"]
self._sample_rate = self._sample_rate or self._org_sample_rate
def load(self, reload=False):
"""
High-level function invoked to load the SleepStudy data
"""
if reload or not self.loaded:
try:
self._load()
except Exception as e:
raise errors.CouldNotLoadError("Unexpected load error for sleep "
"study {}. Please refer to the "
"above traceback.".format(self.identifier),
study_id=self.identifier) from e
return self
def unload(self):
""" Unloads the PSG, header and hypnogram data """
for attr in self._none_on_unload:
setattr(self, attr, None)
def reload(self, warning=True):
""" Unloads and loads """
if warning and self.loaded:
print("Reloading SleepStudy '{}'".format(self.identifier))
self.load(reload=True)
def get_psg_shape(self):
"""
TODO
Returns:
"""
return self.psg.shape
def get_class_counts(self, as_dict=False):
"""
Computes the class counts for the loaded hypnogram.
Args:
as_dict: (bool) return a dictionary mapping from class labels
(ints) to the count (int) for that class instead of
the typical array of class counts.
Returns:
An ndarray of length n_classes of counts if as_dict == False
Otherwise a dictionary mapping class labels to counts.
"""
classes = sorted(self._class_to_period_dict.keys())
counts = np.array([len(self._class_to_period_dict[c]) for c in classes])
if as_dict:
return {cls: count for cls, count in zip(classes, counts)}
else:
return counts
def get_class_indicies(self, class_int):
return self.class_to_period_dict[class_int]
def get_full_psg(self):
"""
TODO
Returns:
"""
return self.psg
def extract_from_psg(self, start, end, channel_inds=None):
"""
Extract PSG data from second 'start' (inclusive) to second 'end'
(exclusive)
Args:
start: int, start second to extract from
end: int, end second to extract from
channel_inds: list, list of channel indices to extract from
Returns:
A Pandas DataFrame view or numpy view
"""
if start > self.last_period_start_second:
raise ValueError("Cannot extract a full period starting from second"
" {}. Last full period of {} seconds starts at "
"second {}.".format(start, self.period_length_sec,
self.last_period_start_second))
sr = self.sample_rate
first_row = int(start * sr)
last_row = int(end * sr)
rows = self.psg[first_row:last_row]
if channel_inds is not None:
return rows[:, channel_inds]
else:
return rows
| 39.707661 | 100 | 0.566743 |
79442c2b8b7838ccc421e22b577cfedf186be0dc | 643 | py | Python | rstem/mcpi/examples/zhuowei_rainbow.py | readysetstem/readysetstem-api | 01e1360f4a28a6783ee1e0fa1bc239dd999de6be | [
"Apache-2.0"
] | 1 | 2018-02-23T20:20:45.000Z | 2018-02-23T20:20:45.000Z | rstem/mcpi/examples/zhuowei_rainbow.py | readysetstem/readysetstem-api | 01e1360f4a28a6783ee1e0fa1bc239dd999de6be | [
"Apache-2.0"
] | 1 | 2016-10-25T18:00:15.000Z | 2016-10-25T18:00:15.000Z | rstem/mcpi/examples/zhuowei_rainbow.py | readysetstem/readysetstem-api | 01e1360f4a28a6783ee1e0fa1bc239dd999de6be | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# mcpipy.com retrieved from URL below, written by zhuowei
# http://www.minecraftforum.net/topic/1638036-my-first-script-for-minecraft-pi-api-a-rainbow/
from .. import minecraft
from .. import block
from math import *
import server
colors = [14, 1, 4, 5, 3, 11, 10]
mc = minecraft.Minecraft.create(server.address)
height = 60
mc.setBlocks(-64,0,0,64,height + len(colors),0,0)
for x in range(0, 128):
for colourindex in range(0, len(colors)):
y = sin((x / 128.0) * pi) * height + colourindex
mc.setBlock(x - 64, int(y), 0, block.WOOL.id, colors[len(colors) - 1 - colourindex]) | 30.619048 | 100 | 0.656299 |
79442c6a0e25142d99c60164f8a08e72cc426d05 | 6,863 | py | Python | src/relstorage/adapters/poller.py | lungj/relstorage | e18394b0197f6b70708037f36defbd3fe3ee5137 | [
"ZPL-2.1"
] | null | null | null | src/relstorage/adapters/poller.py | lungj/relstorage | e18394b0197f6b70708037f36defbd3fe3ee5137 | [
"ZPL-2.1"
] | null | null | null | src/relstorage/adapters/poller.py | lungj/relstorage | e18394b0197f6b70708037f36defbd3fe3ee5137 | [
"ZPL-2.1"
] | null | null | null | ##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from __future__ import absolute_import, print_function
import logging
from zope.interface import implementer
from .interfaces import IPoller
from .interfaces import StaleConnectionError
from .schema import Schema
from .sql import func
log = logging.getLogger(__name__)
@implementer(IPoller)
class Poller(object):
"""Database change notification poller"""
# The zoid is the primary key on both ``current_object`` (history
# preserving) and ``object_state`` (history free), so these
# queries are guaranteed to only produce an OID once.
_list_changes_range_query = Schema.all_current_object.select(
Schema.all_current_object.c.zoid, Schema.all_current_object.c.tid
).where(
Schema.all_current_object.c.tid > Schema.all_current_object.bindparam('min_tid')
).and_(
Schema.all_current_object.c.tid <= Schema.all_current_object.bindparam('max_tid')
).prepared()
_poll_inv_query = Schema.all_current_object.select(
Schema.all_current_object.c.zoid, Schema.all_current_object.c.tid
).where(
Schema.all_current_object.c.tid > Schema.all_current_object.bindparam('tid')
).prepared()
_poll_inv_exc_query = _poll_inv_query.and_(
Schema.all_current_object.c.tid != Schema.all_current_object.bindparam('self_tid')
).prepared()
poll_query = Schema.all_transaction.select(
func.max(Schema.all_transaction.c.tid)
).prepared()
def __init__(self, driver, keep_history, runner, revert_when_stale):
self.driver = driver
self.keep_history = keep_history
self.runner = runner
self.revert_when_stale = revert_when_stale
def poll_invalidations(self, conn, cursor, prev_polled_tid, ignore_tid):
"""
Polls for new transactions.
*conn* and *cursor* must have been created previously by
``open_for_load()`` (a snapshot connection). prev_polled_tid
is the tid returned at the last poll, or None if this is the
first poll. If ignore_tid is not None, changes committed in
that transaction will not be included in the list of changed
OIDs.
Returns ``(changes, new_polled_tid)``, where *changes* is
either a list of ``(oid_int, tid_int)`` that have changed, or
``None`` to indicate that the changes are too complex to li
--- this must cause local storage caches to be invalidated..
*new_polled_tid* can be 0 if there is no data in the database.
"""
# pylint:disable=unused-argument
# find out the tid of the most recent transaction.
self.poll_query.execute(cursor)
rows = cursor.fetchall()
if not rows or not rows[0][0]:
# No data, must be fresh database, without even
# the root object.
# Anything we had cached is now definitely invalid.
return None, 0
new_polled_tid = rows[0][0]
if prev_polled_tid is None:
# This is the first time the connection has polled.
# We'd have to list the entire database for the changes,
# which is clearly no good. So we have no information
# about the state of anything we have cached.
return None, new_polled_tid
if new_polled_tid == prev_polled_tid:
# No transactions have been committed since prev_polled_tid.
return (), new_polled_tid
if new_polled_tid < prev_polled_tid:
# The database connection is stale. This can happen after
# reading an asynchronous slave that is not fully up to date.
# (It may also suggest that transaction IDs are not being created
# in order, which would be a serious bug leading to consistency
# violations.)
if self.revert_when_stale:
# This client prefers to revert to the old state.
log.warning(
"Reverting to stale transaction ID %d and clearing cache. "
"(prev_polled_tid=%d)",
new_polled_tid, prev_polled_tid)
# We have to invalidate the whole cPickleCache, otherwise
# the cache would be inconsistent with the reverted state.
return None, new_polled_tid
# This client never wants to revert to stale data, so
# raise ReadConflictError to trigger a retry.
# We're probably just waiting for async replication
# to catch up, so retrying could do the trick.
raise StaleConnectionError.from_prev_and_new_tid(
prev_polled_tid, new_polled_tid)
# New transaction(s) have been added.
# In the past, but only for history-preserving databases, we
# would check to see if the previously polled transaction no
# longer exists in the transaction table. If it didn't, we
# would return ``(None, new_polled_tid)``, in order to clear
# the Connection cache.
#
# However, we ran for yers without an analogous case for
# history-free databases without problems, on the theory that
# all the unreachable objects will be garbage collected
# anyway.
#
# Thus we became convinced it was safe to remove the check in
# history-preserving databases.
# Get the list of changed OIDs and return it.
stmt = self._poll_inv_query
params = {'tid': prev_polled_tid}
if ignore_tid is not None:
stmt = self._poll_inv_exc_query
params['self_tid'] = ignore_tid
stmt.execute(cursor, params)
# See list_changes: This could be a large result set.
changes = cursor
return changes, new_polled_tid
def list_changes(self, cursor, after_tid, last_tid):
"""
See ``IPoller``.
"""
params = {'min_tid': after_tid, 'max_tid': last_tid}
self._list_changes_range_query.execute(cursor, params)
# Return the cursor: let it be its own iterable. This could be a
# very large result set. For things that matter, like gevent,
# consume in batches allowing periodic switches.
return cursor
| 42.104294 | 90 | 0.64549 |
79442e3bf682b78dcf039fdc9f2801e603921ff8 | 872 | py | Python | alipay/aop/api/response/AlipayOpenDesCreateResponse.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/response/AlipayOpenDesCreateResponse.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/response/AlipayOpenDesCreateResponse.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.GavintestNewLeveaOne import GavintestNewLeveaOne
class AlipayOpenDesCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenDesCreateResponse, self).__init__()
self._ces = None
@property
def ces(self):
return self._ces
@ces.setter
def ces(self, value):
if isinstance(value, GavintestNewLeveaOne):
self._ces = value
else:
self._ces = GavintestNewLeveaOne.from_alipay_dict(value)
def parse_response_content(self, response_content):
response = super(AlipayOpenDesCreateResponse, self).parse_response_content(response_content)
if 'ces' in response:
self.ces = response['ces']
| 29.066667 | 100 | 0.698394 |
79442e775625cd0694de4720fd5de846738b1c9d | 3,485 | py | Python | src/pretix/plugins/banktransfer/urls.py | fabm3n/pretix | 520fb620888d5c434665a6a4a33cb2ab22dd42c7 | [
"Apache-2.0"
] | 1,248 | 2015-04-24T13:32:06.000Z | 2022-03-29T07:01:36.000Z | src/pretix/plugins/banktransfer/urls.py | fabm3n/pretix | 520fb620888d5c434665a6a4a33cb2ab22dd42c7 | [
"Apache-2.0"
] | 2,113 | 2015-02-18T18:58:16.000Z | 2022-03-31T11:12:32.000Z | src/pretix/plugins/banktransfer/urls.py | fabm3n/pretix | 520fb620888d5c434665a6a4a33cb2ab22dd42c7 | [
"Apache-2.0"
] | 453 | 2015-05-13T09:29:06.000Z | 2022-03-24T13:39:16.000Z | #
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
from django.conf.urls import re_path
from pretix.api.urls import orga_router
from pretix.plugins.banktransfer.api import BankImportJobViewSet
from . import views
urlpatterns = [
re_path(r'^control/organizer/(?P<organizer>[^/]+)/banktransfer/import/',
views.OrganizerImportView.as_view(),
name='import'),
re_path(r'^control/organizer/(?P<organizer>[^/]+)/banktransfer/job/(?P<job>\d+)/',
views.OrganizerJobDetailView.as_view(), name='import.job'),
re_path(r'^control/organizer/(?P<organizer>[^/]+)/banktransfer/action/',
views.OrganizerActionView.as_view(), name='import.action'),
re_path(r'^control/organizer/(?P<organizer>[^/]+)/banktransfer/refunds/',
views.OrganizerRefundExportListView.as_view(), name='refunds.list'),
re_path(r'^control/organizer/(?P<organizer>[^/]+)/banktransfer/export/(?P<id>\d+)/$',
views.OrganizerDownloadRefundExportView.as_view(),
name='refunds.download'),
re_path(r'^control/organizer/(?P<organizer>[^/]+)/banktransfer/sepa-export/(?P<id>\d+)/$',
views.OrganizerSepaXMLExportView.as_view(),
name='refunds.sepa'),
re_path(r'^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/banktransfer/import/',
views.EventImportView.as_view(),
name='import'),
re_path(r'^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/banktransfer/job/(?P<job>\d+)/',
views.EventJobDetailView.as_view(), name='import.job'),
re_path(r'^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/banktransfer/action/',
views.EventActionView.as_view(), name='import.action'),
re_path(r'^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/banktransfer/refunds/',
views.EventRefundExportListView.as_view(),
name='refunds.list'),
re_path(r'^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/banktransfer/export/(?P<id>\d+)/$',
views.EventDownloadRefundExportView.as_view(),
name='refunds.download'),
re_path(r'^control/event/(?P<organizer>[^/]+)/(?P<event>[^/]+)/banktransfer/sepa-export/(?P<id>\d+)/$',
views.EventSepaXMLExportView.as_view(),
name='refunds.sepa'),
]
orga_router.register('bankimportjobs', BankImportJobViewSet)
| 53.615385 | 118 | 0.682066 |
79442ebf15a1730ce0af5d8dbbc9186d27a69c8e | 1,670 | py | Python | AtC_Reg_Con_011-020/ARC018/A.py | yosho-18/AtCoder | 50f6d5c92a01792552c31ac912ce1cd557b06fb0 | [
"MIT"
] | null | null | null | AtC_Reg_Con_011-020/ARC018/A.py | yosho-18/AtCoder | 50f6d5c92a01792552c31ac912ce1cd557b06fb0 | [
"MIT"
] | null | null | null | AtC_Reg_Con_011-020/ARC018/A.py | yosho-18/AtCoder | 50f6d5c92a01792552c31ac912ce1cd557b06fb0 | [
"MIT"
] | null | null | null | import sys
# import math, string, itertools, fractions, heapq, collections, re, array, bisect, copy, functools, random
# from collections import deque, defaultdict, Counter; from heapq import heappush, heappop
# from itertools import permutations, combinations, product, accumulate, groupby
# from bisect import bisect_left, bisect_right, insort_left, insort_right
# from operator import itemgetter as ig
sys.setrecursionlimit(10 ** 7)
# inf = 10 ** 20; INF = float("INF"); ans = 0; tmp = 0; ansli = []; tmpli = []; candili = []; mod = 10 ** 9 + 7
# dd = [(-1, 0), (0, 1), (1, 0), (0, -1)]; ddn = dd + [(-1, 1), (1, 1), (1, -1), (-1, -1)]; ddn9 = ddn + [(0, 0)]
"""for dx, dy in dd:
nx = j + dx; ny = i + dy
if 0 <= nx < w and 0 <= ny < h:"""
def wi(): return list(map(int, sys.stdin.readline().split()))
def wip(): return [int(x) - 1 for x in sys.stdin.readline().split()]#WideIntPoint
def ws(): return sys.stdin.readline().split()
def i(): return int(sys.stdin.readline())
def s(): return input()
def hi(n): return [i() for _ in range(n)]
def hs(n): return [s() for _ in range(n)]#HeightString
def mi(n): return [wi() for _ in range(n)]#MatrixInt
def mip(n): return [wip() for _ in range(n)]
def ms(n): return [ws() for _ in range(n)]
a = i()
b = i()
c = i()
if max(a, b, c) == a:
print(1)
if b > c:
print(2)
print(3)
else:
print(3)
print(2)
elif max(a, b, c) == b:
if a > c:
print(2)
print(1)
print(3)
else:
print(3)
print(1)
print(2)
else:
if a > b:
print(2)
print(3)
else:
print(3)
print(2)
print(1) | 32.115385 | 113 | 0.562874 |
79442ee0211fe0d295cce806747e2317a14db0c6 | 15,161 | py | Python | SwiftManager.py | SongBS/SwiftManager | 2fdb7fe8987139b9b050c77a5d711bcdae2d1a62 | [
"Apache-2.0"
] | null | null | null | SwiftManager.py | SongBS/SwiftManager | 2fdb7fe8987139b9b050c77a5d711bcdae2d1a62 | [
"Apache-2.0"
] | null | null | null | SwiftManager.py | SongBS/SwiftManager | 2fdb7fe8987139b9b050c77a5d711bcdae2d1a62 | [
"Apache-2.0"
] | null | null | null | #-*-coding:cp949-*-
'''
Created on 2013. 6. 24.
@author: [email protected]
'''
import wx
import os
import SwiftFunction
import SwauthFunction
import DialogClass
import threading
#down side button
ID_BUTTON=100
#function
ID_EXIT=200
#windows
ID_SPLITTER=300
#menu
ID_SETTING=400
ID_ABOUT=401
ID_SWIFT_AUTH = 500
ID_SWIFT_CONTAINER_LIST = 501
ID_SWIFT_CONTAINER_SELECT = 502
ID_SWIFT_CREATE_CONTAINER = 503
ID_SWIFT_DELETE_CONTAINER = 504
ID_SWIFT_OBEJCT_LIST = 505
ID_SWIFT_OJBECT_METADATA = 506
ID_SWIFT_OBJECT_UPLOAD = 507
ID_SWIFT_OBJECT_DOWNLOAD = 508
ID_SWIFT_OBJECT_COPY = 509
ID_SWIFT_OBJECT_DELETE = 510
ID_SWAUTH_ACCOUNT_LIST = 600
ID_SWAUTH_ACCOUNT_DETAIL = 601
ID_SWAUTH_ADD_ACCOUNT = 602
ID_SWAUTH_DEL_ACCOUNT = 603
ID_SWAUTH_ADD_USER = 604
ID_SWAUTH_DEL_USER = 605
ID_SWAUTH_DEL_USER_ALL = 606
#Toolbar
ID_TOOLBAR_CONNECT = 700
ID_TOOLBAR_FOLDER = 701
ID_TOOLBAR_UPLOAD = 702
ID_TOOLBAR_DOWNLOAD = 703
ID_TOOLBAR_DELETE_FILE = 704
TD_TOOLBAR_DELETE_FOLDER = 705
ID_TOOLBAR_CONFIG = 706
ID_TOOLBAR_HELP = 707
ID_TOOLBAR_EXIT = 708
#side button
ID_SIDE_BUTTON = 800
class MainWindow(wx.Frame):
def __init__(self, parent, id, title):
wx.Frame.__init__(self, parent, id, title, size=(590,700), style=wx.DEFAULT_FRAME_STYLE|wx.NO_FULL_REPAINT_ON_RESIZE|wx.SYSTEM_MENU)
self.dirname = "."
#----------------------------------------------------------------
#config values
self.authToken = ""
self.storageUrl = ""
#swift config values
self.swift_url = ""
self.swift_account = ""
self.swift_user = ""
self.swift_passwd = ""
self.swift_tokenTTL = ""
self.swift_tokenNew = ""
self.selectedContainer = ""
#swauth config values
self.swauth_url = ""
self.swauth_admin = ""
self.swauth_adminpass = ""
#----------------------------------------------------------------
#set frame
self.panel=wx.Panel(self,-1)
self.textControl = wx.TextCtrl(self.panel, pos=(10, 35), size=(550, 500), style=wx.TE_MULTILINE|wx.TE_RICH)
wx.StaticText(self.panel, label="Operation Display:", pos=(10, 15), size=wx.DefaultSize, style=0)
# file
filemenu= wx.Menu()
filemenu.Append(ID_SETTING,"&Settings","")
filemenu.Append(ID_EXIT,"&Exit","")
swauthmenu= wx.Menu()
swauthmenu.Append(ID_SWAUTH_ACCOUNT_LIST,"&Show Account List","")
swauthmenu.Append(ID_SWAUTH_ACCOUNT_DETAIL,"&Show User List","")
swauthmenu.AppendSeparator()
swauthmenu.Append(ID_SWAUTH_ADD_ACCOUNT,"&Create Account","")
swauthmenu.Append(ID_SWAUTH_DEL_ACCOUNT,"&Delete Account","")
swauthmenu.AppendSeparator()
swauthmenu.Append(ID_SWAUTH_ADD_USER,"&Create User","")
swauthmenu.Append(ID_SWAUTH_DEL_USER,"&Delete User","")
swauthmenu.Append(ID_SWAUTH_DEL_USER_ALL,"&Delete User All","")
swiftmenu= wx.Menu()
swiftmenu.Append(ID_SWIFT_AUTH,"&Authentication","")
swiftmenu.AppendSeparator()
swiftmenu.Append(ID_SWIFT_CONTAINER_LIST,"&Show Container List","")
swiftmenu.Append(ID_SWIFT_CONTAINER_SELECT,"&Select Container","")
swiftmenu.Append(ID_SWIFT_CREATE_CONTAINER,"&Create Container","")
swiftmenu.Append(ID_SWIFT_DELETE_CONTAINER,"&Delete Container","")
swiftmenu.AppendSeparator()
swiftmenu.Append(ID_SWIFT_OBEJCT_LIST,"&Show Object List","")
swiftmenu.Append(ID_SWIFT_OJBECT_METADATA,"&Show Object Metadata","")
swiftmenu.Append(ID_SWIFT_OBJECT_UPLOAD,"&Upload Object ","")
swiftmenu.Append(ID_SWIFT_OBJECT_DOWNLOAD,"&Download Object","")
swiftmenu.Append(ID_SWIFT_OBJECT_COPY,"&Copy Object","")
swiftmenu.Append(ID_SWIFT_OBJECT_DELETE,"&Delete Object","")
# help
helpmenu = wx.Menu()
helpmenu.Append(ID_ABOUT,"&About Program","")
menuBar = wx.MenuBar()
menuBar.Append(filemenu,"&Program")
menuBar.Append(swauthmenu,"&Account")
menuBar.Append(swiftmenu,"&Storage")
menuBar.Append(helpmenu, "&Help")
self.SetMenuBar(menuBar)
self.Bind(wx.EVT_MENU, self.OnModifyConfig, id=ID_SETTING)
self.Bind(wx.EVT_MENU, self.OnExit, id=ID_EXIT)
self.Bind(wx.EVT_MENU, self.AccountList, id=ID_SWAUTH_ACCOUNT_LIST)
self.Bind(wx.EVT_MENU, self.AccountUserList, id=ID_SWAUTH_ACCOUNT_DETAIL)
self.Bind(wx.EVT_MENU, self.CreateAccount, id=ID_SWAUTH_ADD_ACCOUNT)
self.Bind(wx.EVT_MENU, self.DeleteAccount, id=ID_SWAUTH_DEL_ACCOUNT)
self.Bind(wx.EVT_MENU, self.CreateUser, id=ID_SWAUTH_ADD_USER)
self.Bind(wx.EVT_MENU, self.DeleteUser, id=ID_SWAUTH_DEL_USER)
self.Bind(wx.EVT_MENU, self.DeleteUserAll, id=ID_SWAUTH_DEL_USER_ALL)
self.Bind(wx.EVT_MENU, self.SwiftAuth, id=ID_SWIFT_AUTH)
self.Bind(wx.EVT_MENU, self.ListContainer, id=ID_SWIFT_CONTAINER_LIST)
self.Bind(wx.EVT_MENU, self.SelectContainer, id=ID_SWIFT_CONTAINER_SELECT)
self.Bind(wx.EVT_MENU, self.CreateContainer, id=ID_SWIFT_CREATE_CONTAINER)
self.Bind(wx.EVT_MENU, self.DeleteContainer, id=ID_SWIFT_DELETE_CONTAINER)
self.Bind(wx.EVT_MENU, self.ListObject, id=ID_SWIFT_OBEJCT_LIST)
self.Bind(wx.EVT_MENU, self.ObjectMeta, id=ID_SWIFT_OJBECT_METADATA)
self.Bind(wx.EVT_MENU, self.UploadObject, id=ID_SWIFT_OBJECT_UPLOAD)
self.Bind(wx.EVT_MENU, self.DownloadObject, id=ID_SWIFT_OBJECT_DOWNLOAD)
self.Bind(wx.EVT_MENU, self.CopyObject, id=ID_SWIFT_OBJECT_COPY)
self.Bind(wx.EVT_MENU, self.DeleteObject, id=ID_SWIFT_OBJECT_DELETE)
self.Bind(wx.EVT_MENU, self.OnAbout, id=ID_ABOUT)
tb = self.CreateToolBar( wx.TB_HORIZONTAL | wx.NO_BORDER | wx.TB_FLAT | wx.TB_TEXT)
tb.SetBackgroundColour('white')
tb.AddSimpleTool(ID_TOOLBAR_CONNECT, wx.Bitmap('images/connect.png', wx.BITMAP_TYPE_PNG), 'Connect Container')
tb.AddSimpleTool(ID_TOOLBAR_FOLDER, wx.Bitmap('images/folder.png', wx.BITMAP_TYPE_PNG), 'Show Container List')
tb.AddSeparator()
tb.AddSimpleTool(ID_TOOLBAR_UPLOAD, wx.Bitmap('images/upload.png', wx.BITMAP_TYPE_PNG), 'Upload Object')
tb.AddSimpleTool(ID_TOOLBAR_DOWNLOAD, wx.Bitmap('images/download.png', wx.BITMAP_TYPE_PNG), 'Download Object')
tb.AddSimpleTool(ID_TOOLBAR_DELETE_FILE, wx.Bitmap('images/delete.png', wx.BITMAP_TYPE_PNG), 'Delete Object')
tb.AddSimpleTool(TD_TOOLBAR_DELETE_FOLDER, wx.Bitmap('images/deletefolder.png', wx.BITMAP_TYPE_PNG), 'Delete Container')
tb.AddSeparator()
tb.AddSimpleTool(ID_TOOLBAR_CONFIG, wx.Bitmap('images/setting.png', wx.BITMAP_TYPE_PNG), 'Settings')
tb.AddSimpleTool(ID_TOOLBAR_HELP, wx.Bitmap('images/help.png', wx.BITMAP_TYPE_PNG), 'Help')
tb.AddSimpleTool(ID_TOOLBAR_EXIT, wx.Bitmap('images/exit.png', wx.BITMAP_TYPE_PNG), 'Exit Prgram')
tb.Realize()
self.Bind(wx.EVT_TOOL, self.TBarConnect, id=ID_TOOLBAR_CONNECT)
self.Bind(wx.EVT_TOOL, self.ListContainer, id=ID_TOOLBAR_FOLDER)
self.Bind(wx.EVT_TOOL, self.UploadObject, id=ID_TOOLBAR_UPLOAD)
self.Bind(wx.EVT_TOOL, self.DownloadObject, id=ID_TOOLBAR_DOWNLOAD)
self.Bind(wx.EVT_TOOL, self.DeleteObject, id=ID_TOOLBAR_DELETE_FILE)
self.Bind(wx.EVT_TOOL, self.DeleteContainerAll, id=TD_TOOLBAR_DELETE_FOLDER)
self.Bind(wx.EVT_TOOL, self.OnModifyConfig, id=ID_TOOLBAR_CONFIG)
self.Bind(wx.EVT_TOOL, self.OnAbout, id=ID_TOOLBAR_HELP)
self.Bind(wx.EVT_TOOL, self.OnExit, id=ID_TOOLBAR_EXIT)
self.sb = self.CreateStatusBar()
self.sb.SetStatusText(os.getcwd())
self.Center()
self.Show(True)
try:
configFile = open("./config.ini", "r")
except:
wx.MessageBox(' Load Configuration Failure' ,'Notice' ,wx.OK|wx.ICON_INFORMATION)
configdig = DialogClass.dialogNewConfigure(self, -1, 'Settings')
configdig.ShowModal()
configdig.Destroy()
return
else:
config = configFile.read()
config = config.encode('utf-8')
for x in config.splitlines():
sLine = x.split("=", 1)
if "swift_url" in sLine :
self.swift_url = sLine[1].strip()
if "swift_account" in sLine :
self.swift_account = sLine[1].strip()
if "swift_user" in sLine :
self.swift_user = sLine[1].strip()
if "swift_passwd" in sLine :
self.swift_passwd = sLine[1].strip()
if "selectedContainer" in sLine :
self.selectedContainer = sLine[1].strip()
if "swift_tokenTTL" in sLine :
self.swift_tokenTTL = sLine[1].strip()
if "swift_tokenNew" in sLine :
self.swift_tokenNew = sLine[1].strip()
if "swauth_url" in sLine :
self.swauth_url = sLine[1].strip()
if "swauth_admin" in sLine :
self.swauth_admin = sLine[1].strip()
if "swauth_adminpass" in sLine :
self.swauth_adminpass = sLine[1].strip()
self.textControl.SetValue("> Load Configuration.\r\n")
self.textControl.AppendText("------------------------------------------------------------------------------------------\r\n")
self.textControl.AppendText("swift_url : %s\r\n" %self.swift_url)
self.textControl.AppendText("swift_account : %s\r\n" %self.swift_account)
self.textControl.AppendText("swift_user : %s\r\n" %self.swift_user)
self.textControl.AppendText("swift_passwd : %s\r\n" %self.swift_passwd)
self.textControl.AppendText("selectedContainer : %s\r\n" %self.selectedContainer)
self.textControl.AppendText("swift_tokenTTL : %s\r\n" %self.swift_tokenTTL)
self.textControl.AppendText("swift_tokenNew : %s\r\n" %self.swift_tokenNew)
self.textControl.AppendText("swauth_url : %s\r\n" %self.swauth_url)
self.textControl.AppendText("swauth_admin : %s\r\n" %self.swauth_admin)
self.textControl.AppendText("swauth_adminpass : %s\r\n" %self.swauth_adminpass)
self.textControl.AppendText("-------------------------------------------------------------------------------------------\r\n\r\n")
#event function
def OnAbout(self, event):
description = " This program is..... \r\n\r\n"
licence = """ Do not allow .... """
info = wx.AboutDialogInfo()
info.SetName('Swift Manager')
info.SetVersion('1.0')
info.SetDescription(description)
info.SetCopyright('(C) Gabia inc.')
info.SetWebSite('http://www.gabia.com')
info.SetLicence(licence)
info.AddDeveloper(' Gabia inc. ([email protected])')
wx.AboutBox(info)
def TBarConnect(self, event):
SwiftFunction.SwiftAuth(self)
SwiftFunction.ListObject(self)
def OnExit(self, event):
self.Close(True)
def OnModifyConfig(self, event):
dialog = DialogClass.dialogModifyConfigure(self, -1, 'Settings')
dialog.ShowModal()
dialog.Destroy()
self.swift_url = dialog.swift_url
self.swift_account = dialog.swift_account
self.swift_user = dialog.swift_user
self.swift_passwd = dialog.swift_passwd
self.swift_tokenTTL = dialog.swift_tokenTTL
self.swift_tokenNew = dialog.swift_tokenNew
self.selectedContainer = dialog.selectedContainer
self.swauth_url = dialog.swauth_url
self.swauth_admin = dialog.swauth_admin
self.swauth_adminpass = dialog.swauth_adminpass
def OnSize(self, event):
size = self.GetSize()
self.splitter.SetSashPosition(size.x / 2)
self.sb.SetStatusText(os.getcwd())
event.Skip()
def OnDoubleClick(self, event):
size = self.GetSize()
self.splitter.SetSashPosition(size.x / 2)
#swauth functon
def AccountList(self, event):
SwauthFunction.AccountList(self)
def AccountUserList(self, event):
SwauthFunction.AccountUserList(self)
def CreateAccount(self, event):
SwauthFunction.CreateAccount(self)
def DeleteAccount(self, event):
SwauthFunction.DeleteAccount(self)
def CreateUser(self, event):
SwauthFunction.CreateUser(self)
def DeleteUser(self, event):
SwauthFunction.DeleteUser(self)
def DeleteUserAll(self, event):
SwauthFunction.DeleteUserAll(self)
# swift function
def SwiftAuth(self, event):
SwiftFunction.SwiftAuth(self)
def ListContainer(self, event):
SwiftFunction.ListContainer(self)
def SelectContainer(self, event):
SwiftFunction.SelectContainer(self)
def CreateContainer(self, event):
SwiftFunction.CreateContainer(self)
def DeleteContainer(self, event):
SwiftFunction.DeleteContainer(self)
def ListObject(self, event):
SwiftFunction.ListObject(self)
def ObjectMeta(self, event):
SwiftFunction.ObjectMeta(self)
def UploadObject(self, event):
th=threading.Thread(SwiftFunction.UploadObject(self))
th.start();
def DownloadObject(self, event):
th=threading.Thread(SwiftFunction.DownloadObject(self))
th.start();
def CopyObject(self, event):
th=threading.Thread(SwiftFunction.CopyObject(self))
th.start();
def DeleteObject(self, event):
th=threading.Thread(SwiftFunction.DeleteObject(self))
th.start();
def DeleteContainerAll(self, event):
th=threading.Thread(SwiftFunction.DeleteContainerAll(self))
th.start();
def EmptyInput(self):
wx.MessageBox('Required fields was not entered.' ,'Notices' ,wx.OK|wx.ICON_INFORMATION)
#main
app = wx.App(0)
MainWindow(None, -1, 'Swift Manager v1.0 beta')
app.MainLoop()
| 39.792651 | 146 | 0.607348 |
79442f6d1f79e6cd2fdc45ae7573948adab2a08d | 671 | py | Python | LeetCode/0508. Most Frequent Subtree Sum/solution.py | InnoFang/algorithms | 01847903f757722b6c877e1631e5413b9376c82e | [
"Apache-2.0"
] | null | null | null | LeetCode/0508. Most Frequent Subtree Sum/solution.py | InnoFang/algorithms | 01847903f757722b6c877e1631e5413b9376c82e | [
"Apache-2.0"
] | null | null | null | LeetCode/0508. Most Frequent Subtree Sum/solution.py | InnoFang/algorithms | 01847903f757722b6c877e1631e5413b9376c82e | [
"Apache-2.0"
] | null | null | null | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
"""
58 / 58 test cases passed.
Runtime: 56 ms
Memory Usage: 18.5 MB
"""
class Solution:
def findFrequentTreeSum(self, root: TreeNode) -> List[int]:
cnt = Counter()
def dfs(node):
if node:
total = node.val + dfs(node.left) + dfs(node.right)
cnt[total] += 1
return total
return 0
dfs(root)
_max = max(cnt.values())
return [s for s, c in cnt.items() if c == _max]
| 26.84 | 67 | 0.532042 |
79442fbd6785a143235813c2a2cc6bafb176b358 | 785 | py | Python | source/pkgsrc/mail/feed2exec/patches/patch-setup.py | Scottx86-64/dotfiles-1 | 51004b1e2b032664cce6b553d2052757c286087d | [
"Unlicense"
] | 1 | 2021-11-20T22:46:39.000Z | 2021-11-20T22:46:39.000Z | source/pkgsrc/mail/feed2exec/patches/patch-setup.py | Scottx86-64/dotfiles-1 | 51004b1e2b032664cce6b553d2052757c286087d | [
"Unlicense"
] | null | null | null | source/pkgsrc/mail/feed2exec/patches/patch-setup.py | Scottx86-64/dotfiles-1 | 51004b1e2b032664cce6b553d2052757c286087d | [
"Unlicense"
] | null | null | null | $NetBSD: patch-setup.py,v 1.2 2020/10/07 19:46:53 schmonz Exp $
Avoid setuptools_scm since tarball has no .git.
--- setup.py.orig 2020-10-06 18:35:43.000000000 +0000
+++ setup.py
@@ -126,10 +126,6 @@ if __name__ == '__main__':
long_description=sphinx2rst('README.rst'),
license=mod.__license_short__,
url=mod.__website__,
- use_scm_version={
- 'write_to': '%s/_version.py'
- % mod.__prog__,
- },
packages=packages,
package_data=package_data,
entry_points={
@@ -140,7 +136,6 @@ if __name__ == '__main__':
]
},
setup_requires=[
- 'setuptools_scm',
'sphinx',
],
install_requires=requires,
| 30.192308 | 63 | 0.529936 |
79443054d1cb5388a3895f8a85101616bc4e9e7f | 1,543 | py | Python | src/scripts/cobertura.py | hugots363/Multi2Sim | 37f146ffa18427885aa10260f74719b00c127692 | [
"Unlicense"
] | null | null | null | src/scripts/cobertura.py | hugots363/Multi2Sim | 37f146ffa18427885aa10260f74719b00c127692 | [
"Unlicense"
] | null | null | null | src/scripts/cobertura.py | hugots363/Multi2Sim | 37f146ffa18427885aa10260f74719b00c127692 | [
"Unlicense"
] | null | null | null | import re
import sys
def get_useful_prefetches(memreport):
result = {}
name = ""
for line in memreport:
m = re.match("\[ ([id]?l[0-9])-0 \]",line)
if m:
name = m.group(1)
m = re.match("Useful Prefetches = ([0-9]+)",line)
if m:
useful_pref = m.group(1)
result[name] = result.get(name,0) + float(useful_pref)
return result
def get_misses_without_prefetch(memreport):
result = {}
for line in memreport:
m = re.match("\[ ([id]?l[0-9])-0 \]",line)
if m:
name = m.group(1)
m = re.match("Misses = ([0-9]+)",line)
if m:
misses = m.group(1)
result[name] = result.get(name,0) + float(misses)
return result
def calc_coverage(useful_pref, misses):
result = []
for name1,name2 in zip(useful_pref,misses):
if name1 != name2:
print('ERROR: Cache level mismatch')
exit(-1)
if misses[name2] == 0:
misses[name2] = useful_pref[name1]
result.append((name1, useful_pref[name1] / misses[name2]))
return result
def main():
if len(sys.argv) != 3:
print("USAGE: program memreport_with_prefetch memreport_without_prefetch")
exit(1)
memreport_pref = open(sys.argv[1])
memreport_nopref = open(sys.argv[2])
useful_pref = get_useful_prefetches(memreport_pref)
misses = get_misses_without_prefetch(memreport_nopref)
print(useful_pref)
print(misses)
coverage = calc_coverage(useful_pref, misses)
print(coverage)
if __name__ == "__main__":
main()
| 24.109375 | 76 | 0.613739 |
79443260a7e636b74b8627e0061a50efda778a90 | 672 | py | Python | vending_machine/vending_machine.py | chapa369/tdd-vending_machine | e5c964a3cf083dbaa5fb95e105003b1c63292ed0 | [
"MIT"
] | null | null | null | vending_machine/vending_machine.py | chapa369/tdd-vending_machine | e5c964a3cf083dbaa5fb95e105003b1c63292ed0 | [
"MIT"
] | null | null | null | vending_machine/vending_machine.py | chapa369/tdd-vending_machine | e5c964a3cf083dbaa5fb95e105003b1c63292ed0 | [
"MIT"
] | 1 | 2020-08-19T06:57:59.000Z | 2020-08-19T06:57:59.000Z | class vending_machine:
# リファクタリングの余地あり
# coinクラスを作って、40円コインがないようにする
price={'コーラ': 120}
def __init__(self):
self.money=0
self.storage={'コーラ': 5}
def insert(self,yen):
available_yen = [10,50,100,500,1000]
if yen in available_yen:
self.money += yen
return True
else:
return False
def get_total(self):
return self.money
def reject_money(self):
tmp=self.money
self.money=0
return tmp
def buy(self, drink_name):
for item in self.storage.keys():
if self.storage[item] >= 1:
self.storage[item] -= 1
| 21 | 44 | 0.547619 |
7944328903abbe53589ff75d2a1704fac32d3aca | 1,793 | py | Python | protolpsmap/dense.py | vene/prototype-lp-sparsemap | 0a3320ebafa2269a400293f63892d18dd76a3801 | [
"MIT"
] | null | null | null | protolpsmap/dense.py | vene/prototype-lp-sparsemap | 0a3320ebafa2269a400293f63892d18dd76a3801 | [
"MIT"
] | null | null | null | protolpsmap/dense.py | vene/prototype-lp-sparsemap | 0a3320ebafa2269a400293f63892d18dd76a3801 | [
"MIT"
] | null | null | null | # dense factor
import numpy as np
class DenseFactor(object):
def __init__(self, m, n):
self.m = m
self.n = n
def vertex(self, y):
i, j = y
um = np.zeros(self.m)
un = np.zeros(self.n)
um[i] = 1
un[j] = 1
U = np.concatenate([um, un])
V = np.outer(um, un)
# if self.scale_u is not None:
# U *= self.scale_u
return U, V
def map_oracle(self, eta_u, eta_v):
S = eta_v.copy()
eta_um, eta_un = eta_u[:self.m], eta_u[self.m:]
S += eta_um[:, np.newaxis]
S += eta_un
i, j = np.unravel_index(S.argmax(), S.shape)
return i, j
def jacobian(self, active_set):
M = []
N = []
for y in active_set:
m, n = self.vertex(y)
M.append(m)
N.append(n.ravel())
M = np.column_stack(M)
N = np.column_stack(N)
Z = np.linalg.pinv(np.dot(M.T, M))
MZM = M @ Z @ M.T
d = len(Z)
one = np.ones((d, d))
eye = np.eye(d)
Zsum = Z.sum()
Zrow = Z.sum(axis=0)
J = (eye - (Z @ one) / Zsum) @ Z
JM = M @ J @ M.T
JN = M @ J @ N.T
return J, JM, JN, M, Z
if __name__ == '__main__':
from .sparsemap_fw import SparseMAPFW
m = 3
n = 4
eta_u = np.random.randn(m + n)
eta_v = np.random.randn(m, n)
df = DenseFactor(m, n)
y = df.map_oracle(eta_u, eta_v)
print(y)
u, v, active_set = SparseMAPFW(df).solve(eta_u, eta_v)
print(active_set)
JM, JN = df.jacobian(active_set)
from numdifftools import Jacobian
def f(eta_u_prime):
u, _, _ = SparseMAPFW(df).solve(eta_u_prime, eta_v)
return u
J = Jacobian(f)
print(J(eta_u) - JM)
| 20.848837 | 59 | 0.492471 |
7944328d4f4e0346705b44ad9ea9c3bc28ae1341 | 580 | py | Python | util/config/validators/validate_redis.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 2,027 | 2019-11-12T18:05:48.000Z | 2022-03-31T22:25:04.000Z | util/config/validators/validate_redis.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 496 | 2019-11-12T18:13:37.000Z | 2022-03-31T10:43:45.000Z | util/config/validators/validate_redis.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 249 | 2019-11-12T18:02:27.000Z | 2022-03-22T12:19:19.000Z | import redis
from util.config.validators import BaseValidator, ConfigValidationException
class RedisValidator(BaseValidator):
name = "redis"
@classmethod
def validate(cls, validator_context):
"""
Validates connecting to redis.
"""
config = validator_context.config
redis_config = config.get("BUILDLOGS_REDIS", {})
if not "host" in redis_config:
raise ConfigValidationException("Missing redis hostname")
client = redis.StrictRedis(socket_connect_timeout=5, **redis_config)
client.ping()
| 26.363636 | 76 | 0.677586 |
794432c3b9e128b747081aa5b022cf4f3932256e | 7,400 | py | Python | wsgi.py | yees-cds/pricefinder | a263d484ba8b68df667899fe0f0c28ab61910d97 | [
"Apache-2.0"
] | null | null | null | wsgi.py | yees-cds/pricefinder | a263d484ba8b68df667899fe0f0c28ab61910d97 | [
"Apache-2.0"
] | null | null | null | wsgi.py | yees-cds/pricefinder | a263d484ba8b68df667899fe0f0c28ab61910d97 | [
"Apache-2.0"
] | null | null | null | """
/*-------------------------------------------------------------------*/
/* */
/* Copyright IBM Corp. 2013 All Rights Reserved */
/* */
/*-------------------------------------------------------------------*/
/* */
/* NOTICE TO USERS OF THE SOURCE CODE EXAMPLES */
/* */
/* The source code examples provided by IBM are only intended to */
/* assist in the development of a working software program. */
/* */
/* International Business Machines Corporation provides the source */
/* code examples, both individually and as one or more groups, */
/* "as is" without warranty of any kind, either expressed or */
/* implied, including, but not limited to the warranty of */
/* non-infringement and the implied warranties of merchantability */
/* and fitness for a particular purpose. The entire risk */
/* as to the quality and performance of the source code */
/* examples, both individually and as one or more groups, is with */
/* you. Should any part of the source code examples prove defective, */
/* you (and not IBM or an authorized dealer) assume the entire cost */
/* of all necessary servicing, repair or correction. */
/* */
/* IBM does not warrant that the contents of the source code */
/* examples, whether individually or as one or more groups, will */
/* meet your requirements or that the source code examples are */
/* error-free. */
/* */
/* IBM may make improvements and/or changes in the source code */
/* examples at any time. */
/* */
/* Changes may be made periodically to the information in the */
/* source code examples; these changes may be reported, for the */
/* sample code included herein, in new editions of the examples. */
/* */
/* References in the source code examples to IBM products, programs, */
/* or services do not imply that IBM intends to make these */
/* available in all countries in which IBM operates. Any reference */
/* to the IBM licensed program in the source code examples is not */
/* intended to state or imply that IBM's licensed program must be */
/* used. Any functionally equivalent program may be used. */
/*-------------------------------------------------------------------*/
"""
import bottle
from bottle import *
import os,sys,logging, traceback, json, string, urllib, urllib2
from BeautifulSoup import BeautifulSoup
import httplib2
import cloudant
import pprint
import urllib
from twilio.rest import TwilioRestClient
# Configs from BlueMix
vcap_config = os.environ.get('VCAP_SERVICES')
decoded_config = json.loads(vcap_config)
dbname = "fabulous-price-finder"
account = None
for key, value in decoded_config.iteritems():
if decoded_config[key][0]['name'].startswith('Twilio'):
twilio_creds = decoded_config[key][0]['credentials']
twilio_authToken = twilio_creds['authToken']
twilio_accountSID = twilio_creds['accountSID']
twilioClient = TwilioRestClient(twilio_accountSID, twilio_authToken)
if key.startswith('cloudant'):
cloudant_creds = decoded_config[key][0]['credentials']
cloudant_host = cloudant_creds['host']
cloudant_port = int(cloudant_creds['port'])
cloudant_username = cloudant_creds['username']
cloudant_password = cloudant_creds['password']
cloudant_url = str(cloudant_creds['url'])
account = cloudant.Account(cloudant_username)
login = account.login(cloudant_username, cloudant_password)
assert login.status_code == 200
db = account.database(dbname)
response = db.put()
print response.json
def sendTextWithMessage(message):
message = twilioClient.messages.create(to="+16172836931", from_="+1857399-2773", body=message)
#Provide all the static css and js files under the static dir to browser
@route('/static/:filename#.*#')
def server_static(filename):
""" This is for JS files """
return static_file(filename, root='static')
# Displays the home page
@bottle.get("/")
def testFunc():
return bottle.template('home')
# Get the prices for all of the items stored in the database
@bottle.get('/getCurrentPrices')
def getCurrentPrices():
z = []
view = db.all_docs()
for doc in view.iter(params={'include_docs': True}):
getCurrentPrice(doc['doc'])
pass
return bottle.template('currentPrice')
# Get the current price of a particular item
def getCurrentPrice(item):
try:
http = httplib2.Http()
status, page = http.request(urllib.unquote_plus(item["url"]))
soup = BeautifulSoup(page)
price = soup.find(id=item["idToCheck"]).string
if price is not None:
sendTextWithMessage("The current price of %s is %s" % (item["name"], price))
d = db.document(item["url"])
resp = d.merge({ 'url': item["url"], 'price': price})
return bottle.template('currentPrice', price=price)
else:
return bottle.template('currentPriceError')
except:
return bottle.template('currentPriceError')
# Saves the item info in the database
@bottle.post('/recordItemInfo')
def recordItemInfo():
name = str(request.forms.get('name'))
url = urllib.quote_plus(request.forms.get('url'))
idToCheck = str(request.forms.get('idToCheck'))
# get document
d = db.document(url)
# merge updated information
resp = d.merge({ 'url': url, 'name': name, 'idToCheck': idToCheck})
bottle.redirect('/displayall')
# Displays all the records in the database
@bottle.get('/displayall')
def displayData():
z = []
view = db.all_docs()
for doc in view.iter(params={'include_docs': True}):
z.append(doc['doc'])
pass
cursor = list(z)
totinf = int(len(cursor))
return bottle.template ('dbdump',totinf=totinf,cursor=cursor)
# Removes all the records from the database
@bottle.post('/clearall')
def clearAll():
# destroy DB
del account[dbname]
# recreate DB
# bug: the db is not getting recreated
db = account.database(dbname)
return bottle.template ('dbdump',totinf=0,cursor=[])
# Removes only the selected stuff from the database
@bottle.post('/delselected')
def removeSelected():
s = urllib.quote_plus(request.forms.get('url'))
# document we want to delete
del_doc = db.document(s)
# iterate over all documents to find revision # for one we want to delete
view = db.all_docs()
for doc in view.iter(params={'include_docs': True}):
if (doc['doc']['url'] == s):
rev = doc['doc']['_rev']
del_doc.delete(rev).raise_for_status()
bottle.redirect('/displayall')
debug(True)
# Error Methods
@bottle.error(404)
def error404(error):
return 'Nothing here--sorry!'
application = bottle.default_app()
if __name__ == '__main__':
port = int(os.getenv('PORT', '8000'))
bottle.run(host='0.0.0.0', port=port)
| 34.90566 | 95 | 0.607973 |
794433a2e08d5c500ef695ef1fa0161e46beff1a | 2,636 | py | Python | AUTOENCODERS/AE_LSTM__KIBANA.py | pawelptak/AI-Anomaly-Detection | 0d3e6072e273d6cc59ba79d5f8c73f393d1ec4e5 | [
"MIT"
] | 1 | 2022-03-23T10:18:17.000Z | 2022-03-23T10:18:17.000Z | AUTOENCODERS/AE_LSTM__KIBANA.py | pawelptak/AI-Anomaly-Detection | 0d3e6072e273d6cc59ba79d5f8c73f393d1ec4e5 | [
"MIT"
] | null | null | null | AUTOENCODERS/AE_LSTM__KIBANA.py | pawelptak/AI-Anomaly-Detection | 0d3e6072e273d6cc59ba79d5f8c73f393d1ec4e5 | [
"MIT"
] | null | null | null | from Classification.AutoencoderWindowsResultsClassificator import AutoencoderWindowsResultsClassificator
from DataPreparing.KIBANALoader import KIBANALoader
from DataPreparing.KIBANAPreprocessor import KIBANAPreprocessor
from Execution.AutoencoderModelExecutor import AutoencoderModelExecutor
from Model.AutoEncoderModelLSTM import AutoEncoderModelLSTM
import numpy as np
import matplotlib.pyplot as plt
from Analisis.AutoencoderResultsAnlyzer import AutoencoderResultsAnlyzer
UNITS = [80, 50, 20]
WINDOW_SIZE = 20
STRIDE = 5
TAKE_TOP_ERRORS = 20
loader = KIBANALoader()
preprocessor = KIBANAPreprocessor(
windows=True, windows_size=WINDOW_SIZE, windows_stride=STRIDE)
df = loader.load_train_data()
df_test = loader.load_test_data()
test_lines = loader.load_test_data_lines()
df = preprocessor.preprocess_train_data(df)
df_test = preprocessor.preprocess_test_data(df_test)
y_label = np.zeros(len(df_test))
model = AutoEncoderModelLSTM(df.shape[2], WINDOW_SIZE, UNITS)
model_executor = AutoencoderModelExecutor(model, epochs=5)
classificator = AutoencoderWindowsResultsClassificator()
analyzer = AutoencoderResultsAnlyzer()
model_executor.fit(df)
x_predicted = model_executor.predict(df_test)
classificator.feed(df_test, x_predicted, y_label)
error = classificator.calculate_reconstruction_error_windows()
def sortSecond(val):
return val[1]
ordered_errors = list(
map(lambda e: (e[0], e[1][0]), enumerate(error)))
ordered_errors.sort(key=sortSecond, reverse=True)
highest_errors_indexes = list(map(
lambda x: x[0], ordered_errors[:TAKE_TOP_ERRORS]))
errors_text = []
for index in highest_errors_indexes:
print(f'\n\n\nWINDOWS with index nr: {index}')
errors_text.append(
f'\n\n\nWINDOWS with index nr: {index}, ERROR SCORE: {error[index]}')
for i in range(index * STRIDE, index * STRIDE + WINDOW_SIZE):
print(test_lines[i])
errors_text.append(test_lines[i])
with open('errors_results.txt', 'a') as the_file:
for line in errors_text:
the_file.write(f"{line}")
# error_lines =
# errors = np.concatenate([error_anomaly, error_normal])
# y_label = np.concatenate([[1 for _ in range(len(error_anomaly))],
# [0 for _ in range(len(error_normal))]])
# (max_f1_score, best_threshold) = classificator.calculate_best_threshold()
# analyzer.feed(errors, y_label, best_threshold,
# max_f1_score, f'AE_1/Dense', 'AE_1'.upper())
# analyzer.plot_results()
# analyzer.plot_confusion_matrix()
# print(error_normal)
# print(high_error_indexes)
# print(np.array(df_test.values)[list(map(lambda h: h[0], high_error_indexes))])
| 30.298851 | 104 | 0.764036 |
794433e9e34480c78f5ff95720cd50e4c0a4b9dd | 8,188 | py | Python | gdal/swig/python/scripts/epsg_tr.py | mloskot/gdal-old-mirror-with-jpegxr-pr | 88409871d445a1ed7c973533cf2075f904a39f90 | [
"MIT"
] | 2 | 2018-03-22T22:31:00.000Z | 2021-07-16T01:34:47.000Z | gdal/swig/python/scripts/epsg_tr.py | mloskot/gdal-old-mirror-with-jpegxr-pr | 88409871d445a1ed7c973533cf2075f904a39f90 | [
"MIT"
] | 7 | 2021-06-04T23:45:15.000Z | 2022-03-12T00:44:14.000Z | gdal/swig/python/scripts/epsg_tr.py | mloskot/gdal-old-mirror-with-jpegxr-pr | 88409871d445a1ed7c973533cf2075f904a39f90 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ******************************************************************************
# $Id$
#
# Project: CFS OGC MapServer
# Purpose: Script to create WKT and PROJ.4 dictionaries for EPSG GCS/PCS
# codes.
# Author: Frank Warmerdam, [email protected]
#
# ******************************************************************************
# Copyright (c) 2001, Frank Warmerdam
# Copyright (c) 2009-2010, 2019, Even Rouault <even dot rouault at spatialys.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# ******************************************************************************
import sys
from osgeo import osr
from osgeo import gdal
# =============================================================================
def Usage():
print('Usage: epsg_tr.py [-wkt] [-pretty_wkt] [-proj4] [-xml] [-postgis]')
print(' [-authority name]')
sys.exit(1)
# =============================================================================
def trHandleCode(set_srid, srs, auth_name, code, deprecated, output_format):
if output_format == '-pretty_wkt':
print('%s:%s' % (auth_name, str(code)))
print(srs.ExportToPrettyWkt())
if output_format == '-xml':
print(srs.ExportToXML())
if output_format == '-wkt':
print('EPSG:%d' % code)
print(srs.ExportToWkt())
if output_format == '-proj4':
out_string = srs.ExportToProj4()
name = srs.GetName()
print('# %s' % name)
if out_string.find('+proj=') > -1:
print('<%s> %s <>' % (str(code), out_string))
else:
print('# Unable to translate coordinate system '
'%s:%s into PROJ.4 format.' % (auth_name, str(code)))
print('#')
if output_format == '-postgis':
if code in set_srid:
if auth_name == 'ESRI':
if int(code) < 32767:
return
assert code not in set_srid, (auth_name, code)
set_srid.add(code)
name = srs.GetName()
if deprecated and 'deprecated' not in name:
name += " (deprecated)"
wkt = srs.ExportToWkt()
proj4text = srs.ExportToProj4()
print('---')
print('--- %s %s : %s' % (auth_name, str(code), name))
print('---')
if proj4text is None or len(proj4text) == 0:
print('-- (unable to translate to PROJ.4)')
else:
wkt = gdal.EscapeString(wkt, scheme=gdal.CPLES_SQL)
proj4text = gdal.EscapeString(proj4text, scheme=gdal.CPLES_SQL)
print('INSERT INTO "spatial_ref_sys" ("srid","auth_name","auth_srid","srtext","proj4text") VALUES (%d,\'%s\',%d,\'%s\',\'%s\');' %
(int(code), auth_name, int(code), wkt, proj4text))
# INGRES COPY command input.
if output_format == '-copy':
try:
wkt = srs.ExportToWkt()
proj4text = srs.ExportToProj4()
print('%s\t%d%s\t%s\t%d%s\t%d%s\n'
% (str(code), 4, auth_name, str(code), len(wkt), wkt,
len(proj4text), proj4text))
except:
pass
# =============================================================================
if __name__ == '__main__':
output_format = '-pretty_wkt'
authority = None
argv = gdal.GeneralCmdLineProcessor(sys.argv)
if argv is None:
sys.exit(0)
# Parse command line arguments.
i = 1
while i < len(argv):
arg = argv[i]
if arg == '-wkt' or arg == '-pretty_wkt' or arg == '-proj4' \
or arg == '-postgis' or arg == '-xml' or arg == '-copy':
output_format = arg
elif arg == '-authority':
i = i + 1
authority = argv[i]
elif arg[0] == '-':
Usage()
else:
Usage()
i = i + 1
# Output BEGIN transaction for PostGIS
if output_format == '-postgis':
print('BEGIN;')
# loop over all codes to generate output
if authority:
authorities = [ authority ]
elif output_format == '-postgis' :
authorities = [ 'EPSG', 'ESRI' ]
else:
authorities = [ 'EPSG', 'ESRI', 'IGNF' ]
set_srid = set()
for authority in authorities:
if authority in ('EPSG', 'ESRI'):
set_codes_geographic = set()
set_codes_geographic_3d = set()
set_codes_projected = set()
set_codes_geocentric = set()
set_codes_compound = set()
set_deprecated = set()
for crs_info in osr.GetCRSInfoListFromDatabase(authority):
code = int(crs_info.code)
if crs_info.type == osr.OSR_CRS_TYPE_COMPOUND:
set_codes_compound.add(code)
elif crs_info.type == osr.OSR_CRS_TYPE_GEOGRAPHIC_3D:
set_codes_geographic_3d.add(code)
elif crs_info.type == osr.OSR_CRS_TYPE_GEOGRAPHIC_2D:
set_codes_geographic.add(code)
elif crs_info.type == osr.OSR_CRS_TYPE_PROJECTED:
set_codes_projected.add(code)
elif crs_info.type == osr.OSR_CRS_TYPE_GEOCENTRIC:
set_codes_geocentric.add(code)
if crs_info.deprecated:
set_deprecated.add(code)
set_codes_geographic = sorted(set_codes_geographic)
set_codes_geographic_3d = sorted(set_codes_geographic_3d)
set_codes_projected = sorted(set_codes_projected)
set_codes_geocentric = sorted(set_codes_geocentric)
set_codes_compound = sorted(set_codes_compound)
for typestr, set_codes in (('Geographic 2D CRS', set_codes_geographic),
('Projected CRS', set_codes_projected),
('Geocentric CRS', set_codes_geocentric),
('Compound CRS', set_codes_compound),
('Geographic 3D CRS', set_codes_geographic_3d)):
if set_codes and output_format == '-postgis':
print('-' * 80)
print('--- ' + authority + ' ' + typestr)
print('-' * 80)
for code in set_codes:
srs = osr.SpatialReference()
srs.SetFromUserInput(authority + ':' + str(code))
deprecated = False
if code in set_deprecated:
deprecated = True
trHandleCode(set_srid, srs, authority, str(code), deprecated, output_format)
else:
for crs_info in osr.GetCRSInfoListFromDatabase(authority):
srs = osr.SpatialReference()
srs.SetFromUserInput(authority + ':' + crs_info.code)
trHandleCode(set_srid, srs, authority, crs_info.code, crs_info.deprecated, output_format)
# Output COMMIT transaction for PostGIS
if output_format == '-postgis':
print('COMMIT;')
print('VACUUM ANALYZE spatial_ref_sys;')
| 36.717489 | 142 | 0.539692 |
7944348585a4720b95c902377562f60ea5edcb6f | 7,838 | py | Python | magenta/models/arbitrary_image_stylization/arbitrary_image_stylization_distill_mobilenet.py | Eshan-Agarwal/magenta | 21f4cbf8ac2717df6a6fbff8cc6a027fbf3e4057 | [
"Apache-2.0"
] | 2 | 2020-04-30T13:46:55.000Z | 2021-07-02T20:07:58.000Z | magenta/models/arbitrary_image_stylization/arbitrary_image_stylization_distill_mobilenet.py | Eshan-Agarwal/magenta | 21f4cbf8ac2717df6a6fbff8cc6a027fbf3e4057 | [
"Apache-2.0"
] | 1 | 2020-03-01T16:02:10.000Z | 2020-03-01T16:02:10.000Z | magenta/models/arbitrary_image_stylization/arbitrary_image_stylization_distill_mobilenet.py | Eshan-Agarwal/magenta | 21f4cbf8ac2717df6a6fbff8cc6a027fbf3e4057 | [
"Apache-2.0"
] | 1 | 2020-09-21T01:23:05.000Z | 2020-09-21T01:23:05.000Z | # Copyright 2020 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Distills a trained style prediction network using a MobileNetV2.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import os
from magenta.models.arbitrary_image_stylization import arbitrary_image_stylization_build_mobilenet_model as build_mobilenet_model
from magenta.models.arbitrary_image_stylization import arbitrary_image_stylization_build_model as build_model
from magenta.models.image_stylization import image_utils
import tensorflow.compat.v1 as tf
from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim
DEFAULT_CONTENT_WEIGHTS = '{"vgg_16/conv3": 1}'
DEFAULT_STYLE_WEIGHTS = ('{"vgg_16/conv1": 0.5e-3, "vgg_16/conv2": 0.5e-3,'
' "vgg_16/conv3": 0.5e-3, "vgg_16/conv4": 0.5e-3}')
flags = tf.app.flags
flags.DEFINE_float('clip_gradient_norm', 0, 'Clip gradients to this norm')
flags.DEFINE_float('learning_rate', 1e-5, 'Learning rate')
flags.DEFINE_float('total_variation_weight', 1e4, 'Total variation weight')
flags.DEFINE_string('content_weights', DEFAULT_CONTENT_WEIGHTS,
'Content weights')
flags.DEFINE_string('style_weights', DEFAULT_STYLE_WEIGHTS, 'Style weights')
flags.DEFINE_integer('batch_size', 8, 'Batch size.')
flags.DEFINE_integer('image_size', 256, 'Image size.')
flags.DEFINE_boolean('random_style_image_size', True,
'Whether to resize the style images '
'to a random size or not.')
flags.DEFINE_boolean(
'augment_style_images', True,
'Whether to augment style images or not.')
flags.DEFINE_boolean('center_crop', False,
'Whether to center crop the style images.')
flags.DEFINE_integer('ps_tasks', 0,
'Number of parameter servers. If 0, parameters '
'are handled locally by the worker.')
flags.DEFINE_integer('save_summaries_secs', 15,
'Frequency at which summaries are saved, in seconds.')
flags.DEFINE_integer('save_interval_secs', 15,
'Frequency at which the model is saved, in seconds.')
flags.DEFINE_integer('task', 0, 'Task ID. Used when training with multiple '
'workers to identify each worker.')
flags.DEFINE_integer('train_steps', 8000000, 'Number of training steps.')
flags.DEFINE_string('master', '', 'BNS name of the TensorFlow master to use.')
flags.DEFINE_string('style_dataset_file', None, 'Style dataset file.')
flags.DEFINE_string('train_dir', None,
'Directory for checkpoints and summaries.')
flags.DEFINE_string('initial_checkpoint', None,
'Path to the pre-trained arbitrary_image_stylization '
'checkpoint')
flags.DEFINE_string('mobilenet_checkpoint', 'mobilenet_v2_1.0_224.ckpt',
'Path to the pre-trained mobilenet checkpoint')
flags.DEFINE_boolean('use_true_loss', False,
'Add true style loss term based on VGG.')
flags.DEFINE_float('true_loss_weight', 1e-9,
'Scale factor for real loss')
FLAGS = flags.FLAGS
def main(unused_argv=None):
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
# Forces all input processing onto CPU in order to reserve the GPU for the
# forward inference and back-propagation.
device = '/cpu:0' if not FLAGS.ps_tasks else '/job:worker/cpu:0'
with tf.device(
tf.train.replica_device_setter(FLAGS.ps_tasks, worker_device=device)):
# Load content images
content_inputs_, _ = image_utils.imagenet_inputs(FLAGS.batch_size,
FLAGS.image_size)
# Loads style images.
[style_inputs_, _,
style_inputs_orig_] = image_utils.arbitrary_style_image_inputs(
FLAGS.style_dataset_file,
batch_size=FLAGS.batch_size,
image_size=FLAGS.image_size,
shuffle=True,
center_crop=FLAGS.center_crop,
augment_style_images=FLAGS.augment_style_images,
random_style_image_size=FLAGS.random_style_image_size)
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
# Process style and content weight flags.
content_weights = ast.literal_eval(FLAGS.content_weights)
style_weights = ast.literal_eval(FLAGS.style_weights)
# Define the model
stylized_images, \
true_loss, \
_, \
bottleneck_feat = build_mobilenet_model.build_mobilenet_model(
content_inputs_,
style_inputs_,
mobilenet_trainable=True,
style_params_trainable=False,
style_prediction_bottleneck=100,
adds_losses=True,
content_weights=content_weights,
style_weights=style_weights,
total_variation_weight=FLAGS.total_variation_weight,
)
_, inception_bottleneck_feat = build_model.style_prediction(
style_inputs_,
[],
[],
is_training=False,
trainable=False,
inception_end_point='Mixed_6e',
style_prediction_bottleneck=100,
reuse=None,
)
print('PRINTING TRAINABLE VARIABLES')
for x in tf.trainable_variables():
print(x)
mse_loss = tf.losses.mean_squared_error(
inception_bottleneck_feat, bottleneck_feat)
total_loss = mse_loss
if FLAGS.use_true_loss:
true_loss = FLAGS.true_loss_weight*true_loss
total_loss += true_loss
if FLAGS.use_true_loss:
tf.summary.scalar('mse', mse_loss)
tf.summary.scalar('true_loss', true_loss)
tf.summary.scalar('total_loss', total_loss)
tf.summary.image('image/0_content_inputs', content_inputs_, 3)
tf.summary.image('image/1_style_inputs_orig', style_inputs_orig_, 3)
tf.summary.image('image/2_style_inputs_aug', style_inputs_, 3)
tf.summary.image('image/3_stylized_images', stylized_images, 3)
mobilenet_variables_to_restore = slim.get_variables_to_restore(
include=['MobilenetV2'],
exclude=['global_step'])
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
train_op = slim.learning.create_train_op(
total_loss,
optimizer,
clip_gradient_norm=FLAGS.clip_gradient_norm,
summarize_gradients=False
)
init_fn = slim.assign_from_checkpoint_fn(
FLAGS.initial_checkpoint,
slim.get_variables_to_restore(
exclude=['MobilenetV2', 'mobilenet_conv', 'global_step']))
init_pretrained_mobilenet = slim.assign_from_checkpoint_fn(
FLAGS.mobilenet_checkpoint, mobilenet_variables_to_restore)
def init_sub_networks(session):
init_fn(session)
init_pretrained_mobilenet(session)
slim.learning.train(
train_op=train_op,
logdir=os.path.expanduser(FLAGS.train_dir),
master=FLAGS.master,
is_chief=FLAGS.task == 0,
number_of_steps=FLAGS.train_steps,
init_fn=init_sub_networks,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs)
def console_entry_point():
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
| 40.194872 | 129 | 0.691631 |
794434db109bea45a8765a6f4f0175ce4fade3af | 877 | py | Python | sdks/python/test/test_ErrorCounts.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | null | null | null | sdks/python/test/test_ErrorCounts.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 6 | 2019-10-23T06:38:53.000Z | 2022-01-22T07:57:58.000Z | sdks/python/test/test_ErrorCounts.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 2 | 2019-10-23T06:31:05.000Z | 2021-08-21T17:32:47.000Z | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: [email protected]
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
from __future__ import absolute_import
import unittest
import appcenter_sdk
from ErrorCounts.clsErrorCounts import ErrorCounts # noqa: E501
from appcenter_sdk.rest import ApiException
class TestErrorCounts(unittest.TestCase):
"""ErrorCounts unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testErrorCounts(self):
"""Test ErrorCounts"""
# FIXME: construct object with mandatory attributes with example values
# model = appcenter_sdk.models.clsErrorCounts.ErrorCounts() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 21.925 | 81 | 0.703535 |
79443554efc9f5551f33556b1d85d9668e29ceeb | 14,447 | py | Python | bpython/test/test_autocomplete.py | supremestdoggo/bpython | 06d255ada61105684f4e2b92f09219f9619506e6 | [
"PSF-2.0"
] | null | null | null | bpython/test/test_autocomplete.py | supremestdoggo/bpython | 06d255ada61105684f4e2b92f09219f9619506e6 | [
"PSF-2.0"
] | null | null | null | bpython/test/test_autocomplete.py | supremestdoggo/bpython | 06d255ada61105684f4e2b92f09219f9619506e6 | [
"PSF-2.0"
] | null | null | null | import inspect
import keyword
import unittest
from collections import namedtuple
from unittest import mock
try:
import jedi
has_jedi = True
except ImportError:
has_jedi = False
from bpython import autocomplete
glob_function = "glob.iglob"
class TestSafeEval(unittest.TestCase):
def test_catches_syntax_error(self):
with self.assertRaises(autocomplete.EvaluationError):
autocomplete.safe_eval("1re", {})
class TestFormatters(unittest.TestCase):
def test_filename(self):
completer = autocomplete.FilenameCompletion()
last_part_of_filename = completer.format
self.assertEqual(last_part_of_filename("abc"), "abc")
self.assertEqual(last_part_of_filename("abc/"), "abc/")
self.assertEqual(last_part_of_filename("abc/efg"), "efg")
self.assertEqual(last_part_of_filename("abc/efg/"), "efg/")
self.assertEqual(last_part_of_filename("/abc"), "abc")
self.assertEqual(last_part_of_filename("ab.c/e.f.g/"), "e.f.g/")
def test_attribute(self):
self.assertEqual(autocomplete.after_last_dot("abc.edf"), "edf")
def completer(matches):
mock_completer = autocomplete.BaseCompletionType()
mock_completer.matches = mock.Mock(return_value=matches)
return mock_completer
class TestGetCompleter(unittest.TestCase):
def test_no_completers(self):
self.assertTupleEqual(autocomplete.get_completer([], 0, ""), ([], None))
def test_one_completer_without_matches_returns_empty_list_and_none(self):
a = completer([])
self.assertTupleEqual(
autocomplete.get_completer([a], 0, ""), ([], None)
)
def test_one_completer_returns_matches_and_completer(self):
a = completer(["a"])
self.assertTupleEqual(
autocomplete.get_completer([a], 0, ""), (["a"], a)
)
def test_two_completers_with_matches_returns_first_matches(self):
a = completer(["a"])
b = completer(["b"])
self.assertEqual(autocomplete.get_completer([a, b], 0, ""), (["a"], a))
def test_first_non_none_completer_matches_are_returned(self):
a = completer([])
b = completer(["a"])
self.assertEqual(autocomplete.get_completer([a, b], 0, ""), ([], None))
def test_only_completer_returns_None(self):
a = completer(None)
self.assertEqual(autocomplete.get_completer([a], 0, ""), ([], None))
def test_first_completer_returns_None(self):
a = completer(None)
b = completer(["a"])
self.assertEqual(autocomplete.get_completer([a, b], 0, ""), (["a"], b))
class TestCumulativeCompleter(unittest.TestCase):
def completer(self, matches):
mock_completer = autocomplete.BaseCompletionType()
mock_completer.matches = mock.Mock(return_value=matches)
return mock_completer
def test_no_completers_fails(self):
with self.assertRaises(ValueError):
autocomplete.CumulativeCompleter([])
def test_one_empty_completer_returns_empty(self):
a = self.completer([])
cumulative = autocomplete.CumulativeCompleter([a])
self.assertEqual(cumulative.matches(3, "abc"), set())
def test_one_none_completer_returns_none(self):
a = self.completer(None)
cumulative = autocomplete.CumulativeCompleter([a])
self.assertEqual(cumulative.matches(3, "abc"), None)
def test_two_completers_get_both(self):
a = self.completer(["a"])
b = self.completer(["b"])
cumulative = autocomplete.CumulativeCompleter([a, b])
self.assertEqual(cumulative.matches(3, "abc"), {"a", "b"})
class TestFilenameCompletion(unittest.TestCase):
def setUp(self):
self.completer = autocomplete.FilenameCompletion()
def test_locate_fails_when_not_in_string(self):
self.assertEqual(self.completer.locate(4, "abcd"), None)
def test_locate_succeeds_when_in_string(self):
self.assertEqual(self.completer.locate(4, "a'bc'd"), (2, 4, "bc"))
def test_issue_491(self):
self.assertNotEqual(self.completer.matches(9, '"a[a.l-1]'), None)
@mock.patch(glob_function, new=lambda text: [])
def test_match_returns_none_if_not_in_string(self):
self.assertEqual(self.completer.matches(2, "abcd"), None)
@mock.patch(glob_function, new=lambda text: [])
def test_match_returns_empty_list_when_no_files(self):
self.assertEqual(self.completer.matches(2, '"a'), set())
@mock.patch(glob_function, new=lambda text: ["abcde", "aaaaa"])
@mock.patch("os.path.expanduser", new=lambda text: text)
@mock.patch("os.path.isdir", new=lambda text: False)
@mock.patch("os.path.sep", new="/")
def test_match_returns_files_when_files_exist(self):
self.assertEqual(
sorted(self.completer.matches(2, '"x')), ["aaaaa", "abcde"]
)
@mock.patch(glob_function, new=lambda text: ["abcde", "aaaaa"])
@mock.patch("os.path.expanduser", new=lambda text: text)
@mock.patch("os.path.isdir", new=lambda text: True)
@mock.patch("os.path.sep", new="/")
def test_match_returns_dirs_when_dirs_exist(self):
self.assertEqual(
sorted(self.completer.matches(2, '"x')), ["aaaaa/", "abcde/"]
)
@mock.patch(
glob_function, new=lambda text: ["/expand/ed/abcde", "/expand/ed/aaaaa"]
)
@mock.patch(
"os.path.expanduser", new=lambda text: text.replace("~", "/expand/ed")
)
@mock.patch("os.path.isdir", new=lambda text: False)
@mock.patch("os.path.sep", new="/")
def test_tilde_stays_pretty(self):
self.assertEqual(
sorted(self.completer.matches(4, '"~/a')), ["~/aaaaa", "~/abcde"]
)
@mock.patch("os.path.sep", new="/")
def test_formatting_takes_just_last_part(self):
self.assertEqual(self.completer.format("/hello/there/"), "there/")
self.assertEqual(self.completer.format("/hello/there"), "there")
class MockNumPy:
"""This is a mock numpy object that raises an error when there is an attempt
to convert it to a boolean."""
def __nonzero__(self):
raise ValueError(
"The truth value of an array with more than one "
"element is ambiguous. Use a.any() or a.all()"
)
class TestDictKeyCompletion(unittest.TestCase):
def test_set_of_keys_returned_when_matches_found(self):
com = autocomplete.DictKeyCompletion()
local = {"d": {"ab": 1, "cd": 2}}
self.assertSetEqual(
com.matches(2, "d[", locals_=local), {"'ab']", "'cd']"}
)
def test_none_returned_when_eval_error(self):
com = autocomplete.DictKeyCompletion()
local = {"e": {"ab": 1, "cd": 2}}
self.assertEqual(com.matches(2, "d[", locals_=local), None)
def test_none_returned_when_not_dict_type(self):
com = autocomplete.DictKeyCompletion()
local = {"l": ["ab", "cd"]}
self.assertEqual(com.matches(2, "l[", locals_=local), None)
def test_none_returned_when_no_matches_left(self):
com = autocomplete.DictKeyCompletion()
local = {"d": {"ab": 1, "cd": 2}}
self.assertEqual(com.matches(3, "d[r", locals_=local), None)
def test_obj_that_does_not_allow_conversion_to_bool(self):
com = autocomplete.DictKeyCompletion()
local = {"mNumPy": MockNumPy()}
self.assertEqual(com.matches(7, "mNumPy[", locals_=local), None)
class Foo:
a = 10
def __init__(self):
self.b = 20
def method(self, x):
pass
class Properties(Foo):
@property
def asserts_when_called(self):
raise AssertionError("getter method called")
class Slots:
__slots__ = ["a", "b"]
class OverriddenGetattribute(Foo):
def __getattribute__(self, name):
raise AssertionError("custom get attribute invoked")
class TestAttrCompletion(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.com = autocomplete.AttrCompletion()
def test_att_matches_found_on_instance(self):
self.assertSetEqual(
self.com.matches(2, "a.", locals_={"a": Foo()}),
{"a.method", "a.a", "a.b"},
)
def test_descriptor_attributes_not_run(self):
com = autocomplete.AttrCompletion()
self.assertSetEqual(
com.matches(2, "a.", locals_={"a": Properties()}),
{"a.b", "a.a", "a.method", "a.asserts_when_called"},
)
def test_custom_get_attribute_not_invoked(self):
com = autocomplete.AttrCompletion()
self.assertSetEqual(
com.matches(2, "a.", locals_={"a": OverriddenGetattribute()}),
{"a.b", "a.a", "a.method"},
)
def test_slots_not_crash(self):
com = autocomplete.AttrCompletion()
self.assertSetEqual(
com.matches(2, "A.", locals_={"A": Slots}), {"A.b", "A.a"},
)
class TestExpressionAttributeCompletion(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.com = autocomplete.ExpressionAttributeCompletion()
def test_att_matches_found_on_instance(self):
self.assertSetEqual(
self.com.matches(5, "a[0].", locals_={"a": [Foo()]}),
{"method", "a", "b"},
)
def test_other_getitem_methods_not_called(self):
class FakeList:
def __getitem__(inner_self, i):
self.fail("possibly side-effecting __getitem_ method called")
self.com.matches(5, "a[0].", locals_={"a": FakeList()})
def test_tuples_complete(self):
self.assertSetEqual(
self.com.matches(5, "a[0].", locals_={"a": (Foo(),)}),
{"method", "a", "b"},
)
@unittest.skip("TODO, subclasses do not complete yet")
def test_list_subclasses_complete(self):
class ListSubclass(list):
pass
self.assertSetEqual(
self.com.matches(5, "a[0].", locals_={"a": ListSubclass([Foo()])}),
{"method", "a", "b"},
)
def test_getitem_not_called_in_list_subclasses_overriding_getitem(self):
class FakeList(list):
def __getitem__(inner_self, i):
self.fail("possibly side-effecting __getitem_ method called")
self.com.matches(5, "a[0].", locals_={"a": FakeList()})
def test_literals_complete(self):
self.assertSetEqual(
self.com.matches(10, "[a][0][0].", locals_={"a": (Foo(),)}),
{"method", "a", "b"},
)
def test_dictionaries_complete(self):
self.assertSetEqual(
self.com.matches(7, 'a["b"].', locals_={"a": {"b": Foo()}}),
{"method", "a", "b"},
)
class TestMagicMethodCompletion(unittest.TestCase):
def test_magic_methods_complete_after_double_underscores(self):
com = autocomplete.MagicMethodCompletion()
block = "class Something(object)\n def __"
self.assertSetEqual(
com.matches(10, " def __", current_block=block),
set(autocomplete.MAGIC_METHODS),
)
Completion = namedtuple("Completion", ["name", "complete"])
@unittest.skipUnless(has_jedi, "jedi required")
class TestMultilineJediCompletion(unittest.TestCase):
def test_returns_none_with_single_line(self):
com = autocomplete.MultilineJediCompletion()
self.assertEqual(
com.matches(2, "Va", current_block="Va", history=[]), None
)
def test_returns_non_with_blank_second_line(self):
com = autocomplete.MultilineJediCompletion()
self.assertEqual(
com.matches(
0, "", current_block="class Foo():\n", history=["class Foo():"]
),
None,
)
def matches_from_completions(
self, cursor, line, block, history, completions
):
with mock.patch("bpython.autocomplete.jedi.Script") as Script:
script = Script.return_value
script.complete.return_value = completions
com = autocomplete.MultilineJediCompletion()
return com.matches(
cursor, line, current_block=block, history=history
)
def test_completions_starting_with_different_letters(self):
matches = self.matches_from_completions(
2,
" a",
"class Foo:\n a",
["adsf"],
[Completion("Abc", "bc"), Completion("Cbc", "bc")],
)
self.assertEqual(matches, None)
def test_completions_starting_with_different_cases(self):
matches = self.matches_from_completions(
2,
" a",
"class Foo:\n a",
["adsf"],
[Completion("Abc", "bc"), Completion("ade", "de")],
)
self.assertSetEqual(matches, {"ade"})
def test_issue_544(self):
com = autocomplete.MultilineJediCompletion()
code = "@asyncio.coroutine\ndef"
history = ("import asyncio", "@asyncio.coroutin")
com.matches(3, "def", current_block=code, history=history)
class TestGlobalCompletion(unittest.TestCase):
def setUp(self):
self.com = autocomplete.GlobalCompletion()
def test_function(self):
def function():
pass
self.assertEqual(
self.com.matches(8, "function", locals_={"function": function}),
{"function("},
)
def test_completions_are_unicode(self):
for m in self.com.matches(1, "a", locals_={"abc": 10}):
self.assertIsInstance(m, str)
def test_mock_kwlist(self):
with mock.patch.object(keyword, "kwlist", new=["abcd"]):
self.assertEqual(self.com.matches(3, "abc", locals_={}), None)
def test_mock_kwlist_non_ascii(self):
with mock.patch.object(keyword, "kwlist", new=["abcß"]):
self.assertEqual(self.com.matches(3, "abc", locals_={}), None)
class TestParameterNameCompletion(unittest.TestCase):
def test_set_of_params_returns_when_matches_found(self):
def func(apple, apricot, banana, carrot):
pass
argspec = list(inspect.getfullargspec(func))
argspec = ["func", argspec, False]
com = autocomplete.ParameterNameCompletion()
self.assertSetEqual(
com.matches(1, "a", argspec=argspec), {"apple=", "apricot="}
)
self.assertSetEqual(com.matches(2, "ba", argspec=argspec), {"banana="})
self.assertSetEqual(com.matches(3, "car", argspec=argspec), {"carrot="})
| 33.992941 | 80 | 0.626497 |
794435bfa093b84efd7f4cc8a5373e1418eb54bf | 23,500 | py | Python | windows_packages_gpu/torch/testing/_internal/distributed/ddp_under_dist_autograd_test.py | codeproject/DeepStack | d96368a3db1bc0266cb500ba3701d130834da0e6 | [
"Apache-2.0"
] | 353 | 2020-12-10T10:47:17.000Z | 2022-03-31T23:08:29.000Z | windows_packages_gpu/torch/testing/_internal/distributed/ddp_under_dist_autograd_test.py | codeproject/DeepStack | d96368a3db1bc0266cb500ba3701d130834da0e6 | [
"Apache-2.0"
] | 80 | 2020-12-10T09:54:22.000Z | 2022-03-30T22:08:45.000Z | windows_packages_gpu/torch/testing/_internal/distributed/ddp_under_dist_autograd_test.py | codeproject/DeepStack | d96368a3db1bc0266cb500ba3701d130834da0e6 | [
"Apache-2.0"
] | 63 | 2020-12-10T17:10:34.000Z | 2022-03-28T16:27:07.000Z | #!/usr/bin/env python3
from typing import NamedTuple
import enum
import logging
import os
import threading
from torch.distributed.nn import RemoteModule
import torch
from torch.distributed import rpc
from torch.nn.parallel import DistributedDataParallel
from torch.testing._internal.common_distributed import (
MultiProcessTestCase,
requires_gloo,
requires_nccl,
skip_if_lt_x_gpu,
)
from torch.testing._internal.common_utils import (
TEST_WITH_ASAN,
)
from torch.testing._internal.dist_utils import dist_init
import torch.distributed as dist
import torch.distributed.autograd as dist_autograd
import torch.nn as nn
import unittest
from torch.testing._internal.distributed.rpc.rpc_agent_test_fixture import (
RpcAgentTestFixture,
)
NUM_EM_ROW = 2
D_SPARSE = 3
D_DENSE = 2
D_HID = 3
D_OUT = 1
NUM_TRAINERS = 4
# Trainers + the master + the remote worker
WORLD_SIZE = NUM_TRAINERS + 2
TRAINER_RANKS = list(range(NUM_TRAINERS))
REMOTE_WORKER_RANK = TRAINER_RANKS[-1] + 1
MASTER_RANK = REMOTE_WORKER_RANK + 1
class DdpMode(enum.Enum):
# Don't apply DDP
NONE = enum.auto()
# Apply DDP to the top level nn.Module
OUTSIDE = enum.auto()
# Embed DDP inside the top level nn.Module
INSIDE = enum.auto()
def init_logger():
logger = logging.getLogger(__name__)
level = logging.DEBUG if "debug" in os.environ else logging.INFO
logger.setLevel(level)
console = logging.StreamHandler()
formatter = logging.Formatter(
"%(asctime)s %(filename)s:%(lineno)s %(levelname)s p:%(processName)s t:%(threadName)s: %(message)s"
)
console.setFormatter(formatter)
console.setLevel(level)
# add the handlers to the logger
logger.addHandler(console)
logger.propagate = False
return logger
gLogger = init_logger()
class FeatureSet(NamedTuple):
""" A feature set has 2 types of features"""
dense_features: torch.Tensor
sparse_features: torch.LongTensor
values: torch.Tensor
def _call_method(method, rref, *args, **kwargs):
return method(rref.local_value(), *args, **kwargs)
def _remote_method(method, rref, *args, **kwargs):
args_tup = tuple([method, rref] + list(args))
return rpc.rpc_sync(
rref.owner(), _call_method, args=args_tup, kwargs=kwargs
)
def _remote_method_async(method, rref, *args, **kwargs):
args_tup = tuple([method, rref] + list(args))
return rpc.rpc_async(
rref.owner(), _call_method, args=args_tup, kwargs=kwargs
)
class RemoteEM(nn.Module):
def __init__(self, num_embeddings: int, embedding_dim: int):
gLogger.info(f"Initing RemoteEM with {num_embeddings} {embedding_dim}")
super(RemoteEM, self).__init__()
init_em = [0.5] * embedding_dim
self.em = nn.EmbeddingBag(
num_embeddings,
embedding_dim,
_weight=torch.Tensor([init_em] * num_embeddings),
)
def forward(self, input: torch.Tensor):
gLogger.debug(f"Running RemoteEM.forward() on: {input}")
return self.em(input, offsets=torch.LongTensor(range(input.shape[0])))
# Return a linear module with predefined parameters.
def getLinear(d_in, d_out):
l = nn.Linear(d_in, d_out, bias=False)
w = torch.ones((d_out, d_in))
w[0][0] = -1
w.requires_grad_()
l.weight.data = w
return l
class RemoteNet(nn.Module):
def __init__(self, d_in: int, d_out: int):
gLogger.info(f"Initing RemoteNet with {d_in} {d_out}")
super(RemoteNet, self).__init__()
self.fc = getLinear(d_in, d_out)
self.relu = nn.ReLU()
def forward(self, input: torch.Tensor):
gLogger.debug(f"Running RemoteNet.forward() on: {input}")
return self.relu(self.fc(input))
class HybridModel(nn.Module):
def __init__(
self,
remote_em_rref: rpc.RRef,
remote_net_rref: rpc.RRef,
process_group_for_ddp: dist.ProcessGroup = None,
):
super(HybridModel, self).__init__()
self.remote_em_rref = remote_em_rref
self.remote_net_rref = remote_net_rref
self.fc1 = getLinear(D_DENSE, D_DENSE)
self.fc2 = getLinear(D_HID, D_OUT)
self.non_ddp_params = tuple(self.fc1.parameters()) + tuple(
self.fc2.parameters()
)
self.ddp_params = ()
if process_group_for_ddp is not None:
self.non_ddp_params, self.ddp_params = (
tuple(self.fc1.parameters()),
tuple(self.fc2.parameters()),
)
gLogger.info("Use DDP for the second local net.")
self.fc2 = DistributedDataParallel(
self.fc2,
check_reduction=True,
process_group=process_group_for_ddp,
)
gLogger.info(
f"HybridModel has {len(list(self.parameters()))} groups of parameters."
)
def forward(self, input: FeatureSet):
gLogger.debug(f"Running HybridModel.forward on {input}")
sparse = _remote_method(
RemoteEM.forward, self.remote_em_rref, input.sparse_features
)
# The same size of mini batch.
assert sparse.shape[0] == input.dense_features.shape[0]
dense = self.fc1(input.dense_features)
x = torch.cat((dense, sparse), 1)
gLogger.debug(f"Concatenated feature: {x}")
x = _remote_method(RemoteNet.forward, self.remote_net_rref, x)
return self.fc2(x)
class Trainer:
def __init__(
self,
remote_em_rref: rpc.RRef,
remote_net_rref: rpc.RRef,
ddp_mode: DdpMode,
rank: int,
):
self.rank = rank
self.trainer_group = dist.new_group(TRAINER_RANKS)
self.remote_em_rref = remote_em_rref
self.remote_net_rref = remote_net_rref
self.hybrid_module = HybridModel(
self.remote_em_rref,
self.remote_net_rref,
self.trainer_group
if ddp_mode in (DdpMode.INSIDE,)
else None,
)
self.ddp_params, self.non_ddp_params = (
self.hybrid_module.ddp_params,
self.hybrid_module.non_ddp_params,
)
if ddp_mode == DdpMode.OUTSIDE:
gLogger.info("Wrapping the whole hybrid module into DDP.")
self.ddp_params += self.non_ddp_params
self.non_ddp_params = ()
self.hybrid_module = DistributedDataParallel(
self.hybrid_module,
check_reduction=True,
process_group=self.trainer_group,
)
gLogger.info(
f"Succeeded in creating a HybridModel instance with "
f"{len(self.ddp_params)} ddp params and {len(self.non_ddp_params)} "
f"other local params."
)
def destroy_pg(self):
dist.destroy_process_group(self.trainer_group)
def train_batch(self, mini_batch: FeatureSet):
grads_dict = None
with dist_autograd.context() as context_id:
output = self.hybrid_module.forward(mini_batch)
loss = (output * mini_batch.values).sum()
dist_autograd.backward(context_id, [loss])
grads_dict = dist_autograd.get_gradients(context_id)
gLogger.info(
f"Loss is {loss} for mini batch: {mini_batch}. "
f"Grads dict has {len(grads_dict)} entries: {grads_dict}"
)
return (
tuple(grads_dict[param] for param in self.ddp_params),
tuple(grads_dict[param] for param in self.non_ddp_params),
)
def get_training_examples():
n = 16
training_examples = FeatureSet(
dense_features=torch.zeros((n, D_DENSE)),
sparse_features=torch.zeros(n, dtype=torch.long),
values=torch.zeros(n),
)
idx = 0
# Every example has another one that has exactly the same features but an
# opposite value. Therefore, their grads cancel each other in all-reduce.
for value in (-1, 1):
for x in (-1 * value, 1 * value):
for y in (1 * value, -1 * value):
for z in (0, 1):
training_examples.dense_features[idx, :] = torch.Tensor(
(x, y)
)
training_examples.sparse_features[idx] = z
training_examples.values[idx] = value
idx += 1
# Split the examples among NUM_TRAINERS trainers
assert 0 == (n % NUM_TRAINERS)
examples_per_trainer = int(n / NUM_TRAINERS)
return [
FeatureSet(
dense_features=training_examples.dense_features[
start : start + examples_per_trainer, :
],
sparse_features=training_examples.sparse_features[
start : start + examples_per_trainer
],
values=training_examples.values[
start : start + examples_per_trainer
],
)
for start in range(0, n, examples_per_trainer)
]
shutdown_signal = threading.Condition()
def set_shutdown_signal():
global shutdown_signal
with shutdown_signal:
shutdown_signal.notify()
@unittest.skipIf(
TEST_WITH_ASAN,
"Skip ASAN as torch + multiprocessing spawn have known issues",
)
class TestDdpUnderDistAutograd(MultiProcessTestCase, RpcAgentTestFixture):
@property
def world_size(self) -> int:
return WORLD_SIZE
def remote_worker_name(self) -> str:
# The name has to be consistent with that in 'dist_init' decorator.
return f"worker{REMOTE_WORKER_RANK}"
def trainer_name(self, rank):
# The name has to be consistent with that in 'dist_init' decorator.
return f"worker{rank}"
def setUp(self):
super(TestDdpUnderDistAutograd, self).setUp()
self._spawn_processes()
def tearDown(self):
super(TestDdpUnderDistAutograd, self).tearDown()
def _remote_worker_process(self):
gLogger.info("The remote worker is running.")
dist.init_process_group(
backend="gloo",
init_method="file://{}".format(self.file_name),
world_size=self.world_size,
rank=self.rank)
global shutdown_signal
with shutdown_signal:
shutdown_signal.wait()
gLogger.info("Exiting remote worker.")
dist.destroy_process_group()
def _trainer_process(self, rank: int):
gLogger.info(f"Running the trainer #{rank}...")
gLogger.info(
f"Initing trainer process group by trainer #{rank} with ranks {TRAINER_RANKS}"
)
dist.init_process_group(
backend="gloo",
init_method="file://{}".format(self.file_name),
world_size=self.world_size,
rank=self.rank)
gLogger.info(f"Waiting for shutdown signal on trainer #{rank}...")
global shutdown_signal
with shutdown_signal:
shutdown_signal.wait()
gLogger.info(f"Exiting the trainer #{rank}...")
dist.destroy_process_group()
def _master_process(self, ddp_mode: DdpMode):
gLogger.info("Running the master process...")
dist.init_process_group(
backend="gloo",
init_method="file://{}".format(self.file_name),
world_size=self.world_size,
rank=self.rank)
remote_em_rref = rpc.remote(
self.remote_worker_name(), RemoteEM, args=(NUM_EM_ROW, D_SPARSE)
)
remote_net_rref = rpc.remote(
self.remote_worker_name(),
RemoteNet,
args=(D_DENSE + D_SPARSE, D_HID),
)
gLogger.info("Created remote rrefs on master")
self.do_test_on_master(ddp_mode, remote_em_rref, remote_net_rref)
def do_test_on_master(
self,
ddp_mode: DdpMode,
remote_em_rref: rpc.RRef,
remote_net_rref: rpc.RRef,
):
trainer_rrefs = []
for rank in TRAINER_RANKS:
trainer = self.trainer_name(rank)
trainer_rrefs.append(
rpc.remote(
trainer,
Trainer,
args=(remote_em_rref, remote_net_rref, ddp_mode, rank),
)
)
training_examples = get_training_examples()
for _ in range(3):
futures = []
for idx, trainer_rref in enumerate(trainer_rrefs):
futures.append(
_remote_method_async(
Trainer.train_batch,
trainer_rref,
training_examples[idx],
)
)
for future in futures:
ddp_grads, non_ddp_grads = future.wait()
for grad in ddp_grads:
self.assertEqual(
grad,
torch.zeros_like(grad),
msg="The grad for any ddp parameter should be zeros, because "
"the training examples' grads cancel each other.",
)
for grad in non_ddp_grads:
self.assertNotEqual(
grad,
torch.zeros_like(grad),
msg="The grad for any non-ddp parameter shouldn't be zeros",
)
# Destroy process groups
for idx, trainer_rref in enumerate(trainer_rrefs):
_remote_method_async(
Trainer.destroy_pg,
trainer_rref,
).wait()
# Send shutdown signals.
for rank in TRAINER_RANKS:
trainer = self.trainer_name(rank)
rpc.rpc_sync(trainer, set_shutdown_signal, args=())
rpc.rpc_sync(self.remote_worker_name(), set_shutdown_signal, args=())
def _do_test(self, ddp_mode):
if self.rank == MASTER_RANK:
self._master_process(ddp_mode)
elif self.rank == REMOTE_WORKER_RANK:
self._remote_worker_process()
elif self.rank in TRAINER_RANKS:
self._trainer_process(self.rank)
else:
raise RuntimeError(f"Unknow process rank: {self.rank}")
@requires_gloo()
@dist_init
def test_backward_no_ddp(self):
self._do_test(DdpMode.NONE)
@requires_gloo()
@dist_init
def test_backward_ddp_outside(self):
self._do_test(DdpMode.OUTSIDE)
@requires_gloo()
@dist_init
def test_backward_ddp_inside(self):
self._do_test(DdpMode.INSIDE)
@unittest.skipIf(
TEST_WITH_ASAN,
"Skip ASAN as torch + multiprocessing spawn have known issues",
)
class TestDdpComparison(MultiProcessTestCase, RpcAgentTestFixture):
@property
def world_size(self) -> int:
return NUM_TRAINERS
def trainer_name(self, rank):
# The name has to be consistent with that in 'dist_init' decorator.
return f"worker{rank}"
def setUp(self):
super(TestDdpComparison, self).setUp()
self._spawn_processes()
def tearDown(self):
super(TestDdpComparison, self).tearDown()
@requires_gloo()
@dist_init
def test_ddp_comparison(self):
gLogger.info(f"Running trainer rank: {self.rank}")
# Each trainer uses a different random seed. Otherwise, they are going
# to have exactly the same initial model parameters, input, and
# therefore grads. That means the grads will be the same before and
# after DDP's all-reduce.
torch.manual_seed(self.rank)
dist.init_process_group(
backend="gloo",
init_method="file://{}".format(self.file_name),
world_size=self.world_size,
rank=self.rank)
net = nn.Linear(2, 3)
ddp_net = DistributedDataParallel(
net
)
inputs = torch.rand((3, 2))
# Use distributed autograd. The gradients will be in RPC context map.
grads_dict = {}
with dist_autograd.context() as context_id:
loss = ddp_net(inputs).norm()
dist_autograd.backward(context_id, [loss])
grads_dict = dist_autograd.get_gradients(context_id)
gLogger.info(f"Trainer #{self.rank} got grad dict: {grads_dict}")
# Use local autograd. The gradients will be in each variable's '.grad'.
loss = ddp_net(inputs).norm()
loss.backward()
# The gradients should be the same
for param in net.parameters():
self.assertTrue(
param in grads_dict,
msg=f"Param {param} is not in dist_auto grad dict {grads_dict}",
)
self.assertEqual(
grads_dict[param],
param.grad,
msg=f"The grads for param {param} are different under local "
f"and dist autograd: {param.grad} \n---\n {grads_dict[param]}",
)
dist.destroy_process_group()
@requires_gloo()
@dist_init
def test_ddp_dist_autograd_sparse_grads(self):
# Each trainer uses a different random seed. Otherwise, they are going
# to have exactly the same initial model parameters, input, and
# therefore grads. That means the grads will be the same before and
# after DDP's all-reduce.
torch.manual_seed(self.rank)
dist.init_process_group(
backend="gloo",
init_method="file://{}".format(self.file_name),
world_size=self.world_size,
rank=self.rank)
model = nn.EmbeddingBag(10, 3, sparse=True)
ddp_model = DistributedDataParallel(model)
# Different inputs for each
input = torch.LongTensor(10).random_(0, 10)
offsets = torch.LongTensor([0, 4])
# Run local.
loss = ddp_model(input, offsets).sum()
loss.backward()
with dist_autograd.context() as context_id:
loss = ddp_model(input, offsets).sum()
dist_autograd.backward(context_id, [loss])
grads_dict = dist_autograd.get_gradients(context_id)
self.assertEqual(1, len(grads_dict))
self.assertEqual(model.weight.grad, grads_dict[model.weight])
@staticmethod
def get_remote_grads(rref, context_id):
return dist_autograd.get_gradients(context_id)[rref.local_value().weight]
@requires_gloo()
@dist_init
def test_ddp_dist_autograd_local_vs_remote(self):
# Each trainer uses a different random seed. Otherwise, they are going
# to have exactly the same initial model parameters, input, and
# therefore grads. That means the grads will be the same before and
# after DDP's all-reduce.
torch.manual_seed(self.rank)
dist.init_process_group(
backend="gloo",
init_method="file://{}".format(self.file_name),
world_size=self.world_size,
rank=self.rank)
remote_layer1 = RemoteModule("worker0", nn.Linear, args=(10, 5, False))
layer1 = nn.Linear(10, 5, False)
# Start with the same parameters for remote and local
layer1.weight = remote_layer1.module_rref.to_here().weight
# Run local case.
layer2 = nn.Linear(5, 1)
inputs = torch.rand((10, 10))
ddp_model = DistributedDataParallel(layer2)
loss = ddp_model(layer1(inputs)).sum()
loss.backward()
# Run remote case.
with dist_autograd.context() as context_id:
loss = ddp_model(remote_layer1(inputs)).sum()
dist_autograd.backward(context_id, [loss])
grads_dict = dist_autograd.get_gradients(context_id)
dist.barrier()
self.assertEqual(layer2.weight.grad, grads_dict[layer2.weight])
self.assertEqual(
layer1.weight.grad,
rpc.rpc_sync(
"worker0",
TestDdpComparison.get_remote_grads,
args=(remote_layer1.module_rref, context_id)
)
)
@skip_if_lt_x_gpu(NUM_TRAINERS)
@requires_nccl()
@dist_init
def test_ddp_dist_autograd_local_vs_remote_gpu(self):
# Each trainer uses a different random seed. Otherwise, they are going
# to have exactly the same initial model parameters, input, and
# therefore grads. That means the grads will be the same before and
# after DDP's all-reduce.
torch.manual_seed(self.rank)
dist.init_process_group(
backend="gloo",
init_method="file://{}".format(self.file_name),
world_size=self.world_size,
rank=self.rank)
remote_layer1 = RemoteModule("worker0", nn.Linear, args=(10, 7, False))
layer1 = nn.Linear(10, 7, False)
# Start with the same parameters for remote and local
layer1.weight = remote_layer1.module_rref.to_here().weight
layer2 = nn.Linear(7, 5).cuda(self.rank)
ddp_layer2 = DistributedDataParallel(layer2, device_ids=[self.rank])
remote_layer3 = RemoteModule("worker0", nn.Linear, args=(5, 3, False))
layer3 = nn.Linear(5, 3, False)
# Start with the same parameters for remote and local
layer3.weight = remote_layer3.module_rref.to_here().weight
layer4 = nn.Linear(3, 1).cuda(self.rank)
ddp_layer4 = DistributedDataParallel(layer4, device_ids=[self.rank])
# Run local case.
inputs = torch.rand((10, 10))
loss = ddp_layer4(
layer3(
ddp_layer2(
layer1(inputs).cuda(self.rank)
).cpu()
).cuda(self.rank)
).sum()
loss.backward()
# Run remote case.
with dist_autograd.context() as context_id:
loss = ddp_layer4(
remote_layer3(
ddp_layer2(
remote_layer1(inputs).cuda(self.rank)
).cpu()
).cuda(self.rank)
).sum()
dist_autograd.backward(context_id, [loss])
grads_dict = dist_autograd.get_gradients(context_id)
dist.barrier()
self.assertEqual(
layer1.weight.grad,
rpc.rpc_sync(
"worker0",
TestDdpComparison.get_remote_grads,
args=(remote_layer1.module_rref, context_id)
)
)
self.assertEqual(layer2.weight.grad, grads_dict[layer2.weight])
self.assertEqual(
layer3.weight.grad,
rpc.rpc_sync(
"worker0",
TestDdpComparison.get_remote_grads,
args=(remote_layer3.module_rref, context_id)
)
)
self.assertEqual(layer4.weight.grad, grads_dict[layer4.weight])
| 35.179641 | 108 | 0.58783 |
794435f64a955dfb2d8aec866dbec6b76a68c557 | 2,683 | py | Python | capsul/study_config/config_modules/attributes_config.py | Dralehcab/capsul | cc27fe74706aeb646ac54244a14e432caacee3c7 | [
"CECILL-B"
] | null | null | null | capsul/study_config/config_modules/attributes_config.py | Dralehcab/capsul | cc27fe74706aeb646ac54244a14e432caacee3c7 | [
"CECILL-B"
] | null | null | null | capsul/study_config/config_modules/attributes_config.py | Dralehcab/capsul | cc27fe74706aeb646ac54244a14e432caacee3c7 | [
"CECILL-B"
] | null | null | null | ##########################################################################
# CAPSUL - Copyright (C) CEA, 2016
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
import os
import six
from traits.api import Bool, Str, Undefined, List, DictStrStr
from capsul.study_config.study_config import StudyConfigModule
from capsul.attributes.attributes_factory import AttributesFactory
from capsul.attributes.attributes_schema import AttributesSchema, \
ProcessAttributes
from capsul.attributes.completion_engine \
import ProcessCompletionEngineFactory, PathCompletionEngineFactory
class AttributesConfig(StudyConfigModule):
'''Attributes-based completion configuration module for StudyConfig
'''
dependencies = []
def __init__(self, study_config, configuration):
super(AttributesConfig, self).__init__(study_config, configuration)
default_paths = ['capsul.attributes.completion_engine_factory']
self.study_config.add_trait(
'attributes_schema_paths',
List(default_paths, Str(Undefined, output=False), output=False,
desc='attributes shchema module name'))
self.study_config.add_trait(
'attributes_schemas',
DictStrStr(output=False,
desc='attributes shchemas names'))
self.study_config.add_trait(
'process_completion',
Str('builtin', output=False,
desc='process completion model name'))
self.study_config.add_trait(
'path_completion',
Str(Undefined, output=False,
desc='path completion model name'))
self.study_config.modules_data.attributes_factory = AttributesFactory()
def initialize_module(self):
'''
'''
factory = self.study_config.modules_data.attributes_factory
factory.class_types['schema'] = AttributesSchema
factory.class_types['process_completion'] \
= ProcessCompletionEngineFactory
factory.class_types['path_completion'] \
= PathCompletionEngineFactory
factory.class_types['process_attributes'] \
= ProcessAttributes
factory.module_path = self.study_config.attributes_schema_paths
def initialize_callbacks(self):
self.study_config.on_trait_change(
self.initialize_module,
['attributes_schemas', 'process_completion',
'path_completion', 'attributes_shema_paths'])
| 38.884058 | 79 | 0.660082 |
7944360d9f8a57981506c28d51d154c39dffc601 | 348 | py | Python | extra/game-mechanics/platform-game-project/Listing28_ScreenPosition.py | PythonBiellaGroup/LearningPythonWithGames | 4dbcff5b55f7f19d09127c7ef7dd04791d9cad96 | [
"MIT"
] | 2 | 2021-03-20T14:54:00.000Z | 2021-06-16T19:10:52.000Z | extra/game-mechanics/platform-game-project/Listing28_ScreenPosition.py | PythonBiellaGroup/LearningPythonWithGames | 4dbcff5b55f7f19d09127c7ef7dd04791d9cad96 | [
"MIT"
] | null | null | null | extra/game-mechanics/platform-game-project/Listing28_ScreenPosition.py | PythonBiellaGroup/LearningPythonWithGames | 4dbcff5b55f7f19d09127c7ef7dd04791d9cad96 | [
"MIT"
] | null | null | null |
def ScreenPositionUpdate(player, screenPosition):
if player.centre[1] - screenPosition < 150:
screenPosition = player.centre[1] - 150
elif player.centre[1] - screenPosition > 600 - 150:
screenPosition = player.centre[1] - (600 - 150)
if screenPosition > 0:
screenPosition = 0
return screenPosition
| 31.636364 | 55 | 0.652299 |
794436220fbc8c74dfbc3f5d5e20d2ec9df6f070 | 30,337 | py | Python | DSSP_statistics.py | sutormin94/N-to-C_asymmetry | ed4f9d6ed6c202a46d619d9ebec097134ee04985 | [
"MIT"
] | null | null | null | DSSP_statistics.py | sutormin94/N-to-C_asymmetry | ed4f9d6ed6c202a46d619d9ebec097134ee04985 | [
"MIT"
] | null | null | null | DSSP_statistics.py | sutormin94/N-to-C_asymmetry | ed4f9d6ed6c202a46d619d9ebec097134ee04985 | [
"MIT"
] | null | null | null | ###############################################
##Dmitry Sutormin, 2020##
##N-to-C-terminus assymetry in protein domain secondary structure elements composition##
#Pareses DSSP output generated for representative structures with Run_DSSP.py
#Get statistics of secondary structure elements at N- and C-termini of protein domains.
###############################################
#######
#Packages to be imported.
#######
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as st
#######
#Variables to be defined.
#######
#Path to DSSP data file.
DSSP_data_inpath="C:\\Users\sutor\OneDrive\ThinkPad_working\Sutor\Documents\Skoltech_PhD\Structural_bioinformatics\Project\DSSP\DSSP\DSSP_Representative_pdb_domains.txt"
#Path to output file.
DSSP_data_outpath="C:\\Users\sutor\OneDrive\ThinkPad_working\Sutor\Documents\Skoltech_PhD\Structural_bioinformatics\Project\DSSP\\"
#######
#Read DSSP output.
#######
def read_dssp_data(DSSP_inpath):
#Read DSSP data keep in dictionary.
DSSP_data_dict={}
filein=open(DSSP_inpath, 'r')
for line in filein:
line=line.rstrip().split('\t')
if line[0] not in ['PFAM_id']:
PFAM_id=line[0]
PDB_id=line[1]
Chain_id=line[2]
Start=int(line[3])
End=int(line[4])
Resolution=float(line[5])
SSE_string=line[6]
Phi_list=[float(x) for x in line[7].lstrip('[').rstrip(']').split(', ')]
Psi_list=[float(x) for x in line[8].lstrip('[').rstrip(']').split(', ')]
DSSP_data_dict[PFAM_id]=[PDB_id, Chain_id, Start, End, Resolution, SSE_string, Phi_list, Psi_list]
filein.close()
return DSSP_data_dict
#######
#Filter data, split data by domain length.
#######
def define_length_groups(DSSP_data_dict, min_len, thr_len):
Domain_length_ar=[]
Discarded_short_structures={}
Short_structures={}
Long_structures={}
for pfam_id, dssp_data in DSSP_data_dict.items():
domain_len=len(dssp_data[5])
Domain_length_ar.append(domain_len)
#Classify domains by length.
if domain_len<min_len:
Discarded_short_structures[pfam_id]=dssp_data
elif min_len<=domain_len<thr_len:
Short_structures[pfam_id]=dssp_data
elif domain_len>=thr_len:
Long_structures[pfam_id]=dssp_data
#Plot domain length distribution.
fig, plot=plt.subplots(1,1,figsize=(3,3), dpi=100)
plot.hist(Domain_length_ar, bins=50, rwidth=0.85, color='#94fff1', edgecolor='black', linewidth=0.1)
plot.axvline(x=min_len, ls='--', linewidth=0.7, color='black')
plot.axvline(x=thr_len, ls='--', linewidth=0.7)
plot.set_xlabel('Domain length, aa')
plot.set_ylabel('Number of structures')
plot.set_yscale('log')
plot.set_title('Representative structures\nafter DSSP')
plot.annotate(f'Number of\nstructures\n{len(Domain_length_ar)}', (0.5,0.7), xycoords='axes fraction')
plot.annotate(f'x<{min_len} : {len(Discarded_short_structures)}', (0.5,0.6), xycoords='axes fraction', size=7)
plot.annotate(f'{min_len}<=x<{thr_len} : {len(Short_structures)}', (0.5,0.5), xycoords='axes fraction', size=7)
plot.annotate(f'x>={thr_len} : {len(Long_structures)}', (0.5,0.4), xycoords='axes fraction', size=7)
plt.tight_layout()
plt.show()
return Short_structures, Long_structures
#######
#Take phi, psi angles for N- and C-termini.
#######
def phi_psi_N_to_C(structures_dict, window_width):
phi_N=[]
phi_C=[]
psi_N=[]
psi_C=[]
phi=[]
psi=[]
for pfam_id, dssp_data in structures_dict.items():
phi_list=dssp_data[6]
psi_list=dssp_data[7]
phi_N+=phi_list[:window_width]
phi_C+=phi_list[-window_width:]
psi_N+=psi_list[:window_width]
psi_C+=psi_list[-window_width:]
phi+=phi_list
psi+=psi_list
return phi_N, phi_C, psi_N, psi_C, phi, psi
#######
#Create secondary structure element frequency matrix.
#######
def ss_element_frequency_matrix(structures_dict, window_width):
#Create positioned matrix of secondary structure elements.
ss_matrix_N=[]
ss_matrix_C=[]
for i in range(window_width):
column_N=[]
column_C=[]
for pfam_id, dssp_data in structures_dict.items():
SSE_string=dssp_data[5]
column_N.append(SSE_string[i])
column_C.append(SSE_string[-window_width+i])
ss_matrix_N.append(column_N)
ss_matrix_C.append(column_C)
#Create position frequency matrix of secondary structure elements.
DSSP_alphabet=['H', 'B', 'E', 'G', 'I', 'T', 'S', '-']
ss_pfm_N={}
ss_pfm_C={}
ss_pfm_conf_N={}
ss_pfm_conf_C={}
for letter_code in DSSP_alphabet:
#Keep frequences of ss elements.
frequency_row_N=[]
frequency_row_C=[]
#Keep boundaries of confident interval.
conf_upper_N=[]
conf_upper_C=[]
conf_lower_N=[]
conf_lower_C=[]
for i in range(len(ss_matrix_N)):
column_letter_freq_N=ss_matrix_N[i].count(letter_code)/float(len(ss_matrix_N[i]))
confident_interval_N=st.binom.interval(0.95, len(ss_matrix_N[i]), column_letter_freq_N, loc=0)
lower_N=confident_interval_N[0]/len(ss_matrix_N[i])
upper_N=confident_interval_N[1]/len(ss_matrix_N[i])
conf_lower_N.append(lower_N)
conf_upper_N.append(upper_N)
frequency_row_N.append(column_letter_freq_N)
column_letter_freq_C=ss_matrix_C[i].count(letter_code)/float(len(ss_matrix_C[i]))
confident_interval_C=st.binom.interval(0.95, len(ss_matrix_C[i]), column_letter_freq_C, loc=0)
lower_C=confident_interval_C[0]/len(ss_matrix_C[i])
upper_C=confident_interval_C[1]/len(ss_matrix_C[i])
conf_lower_C.append(lower_C)
conf_upper_C.append(upper_C)
frequency_row_C.append(column_letter_freq_C)
ss_pfm_N[letter_code]=frequency_row_N
ss_pfm_C[letter_code]=frequency_row_C
ss_pfm_conf_N[letter_code+'_upper']=conf_upper_N
ss_pfm_conf_N[letter_code+'_lower']=conf_lower_N
ss_pfm_conf_C[letter_code+'_upper']=conf_upper_C
ss_pfm_conf_C[letter_code+'_lower']=conf_lower_C
print(ss_pfm_N)
print(ss_pfm_C)
return ss_matrix_N, ss_matrix_C, ss_pfm_N, ss_pfm_C, ss_pfm_conf_N, ss_pfm_conf_C
#######
#Enrichment of secondary structure elements N- over C-terminus.
#######
def ss_ele_enrichment(ss_pfm_N, ss_pfm_C):
enrichment_N_to_C_dict={}
for letter_code, frequency_row_N in ss_pfm_N.items():
frequency_N=np.array(frequency_row_N)
frequency_C=np.array(ss_pfm_C[letter_code][::-1])
enrichment_N_to_C=frequency_N/frequency_C
enrichment_N_to_C_dict[letter_code]=enrichment_N_to_C
return enrichment_N_to_C_dict
#######
#Terminal beta-strands relations: simultaneous occurence or independant?.
#######
def termini_dependance(ss_matrix_N, ss_matrix_C, ss_pfm_N, ss_pfm_C, local_window_width):
DSSP_alphabet=['H', 'B', 'E', 'G', 'I', 'T', 'S', '-']
#Calculate observed frequences of elements co-occurence coordinate-wise.
Observed_matrices_dict={}
for letter_code_1 in DSSP_alphabet:
for letter_code_2 in DSSP_alphabet:
Ni_Cj_freq_2d_ar=[]
for Ni in range(local_window_width):
Cj_freq_1d_ar=[]
for Cj in range(local_window_width):
N_column_i=ss_matrix_N[Ni]
C_column_j=ss_matrix_C[-Cj-1]
Ni_Cj_counts=0
for structure_k in range(len(N_column_i)):
if (N_column_i[structure_k]==letter_code_1) and (C_column_j[structure_k]==letter_code_2):
Ni_Cj_counts+=1
Ni_Cj_frequency=Ni_Cj_counts/float(len(N_column_i))
Cj_freq_1d_ar.append(Ni_Cj_frequency)
Ni_Cj_freq_2d_ar.append(Cj_freq_1d_ar)
Ni_Cj_freq_2d_ar_np=np.array(Ni_Cj_freq_2d_ar)
Observed_matrices_dict[letter_code_1+letter_code_2]=Ni_Cj_freq_2d_ar_np
#Calculate expected frequences of elements co-occurence coordinate-wise.
Expected_matrices_dict={}
for letter_code_1 in DSSP_alphabet:
for letter_code_2 in DSSP_alphabet:
Expected_Ni_Cj_freq_2d_ar=[]
for Ni in range(local_window_width):
Expected_Cj_freq_1d_ar=[]
for Cj in range(local_window_width):
Ni_frequency=ss_pfm_N[letter_code_1][Ni]
Cj_frequency=ss_pfm_C[letter_code_2][-Cj-1]
Expected_Ni_Cj_frequency=Ni_frequency*Cj_frequency
Expected_Cj_freq_1d_ar.append(Expected_Ni_Cj_frequency)
Expected_Ni_Cj_freq_2d_ar.append(Expected_Cj_freq_1d_ar)
Expected_Ni_Cj_freq_2d_ar_np=np.array(Expected_Ni_Cj_freq_2d_ar)
Expected_matrices_dict[letter_code_1+letter_code_2]=Expected_Ni_Cj_freq_2d_ar_np
#Calculate observed over expected ratio of frequences of elements co-occurence coordinate-wise.
Obs_over_exp_matrices_dict={}
for letter_code_1 in DSSP_alphabet:
for letter_code_2 in DSSP_alphabet:
Obs_over_exp_freq_matrix=np.divide(Observed_matrices_dict[letter_code_1+letter_code_2], Expected_matrices_dict[letter_code_1+letter_code_2])
Obs_over_exp_matrices_dict[letter_code_1+letter_code_2]=Obs_over_exp_freq_matrix
print(letter_code_1, letter_code_2, Obs_over_exp_freq_matrix)
return Obs_over_exp_matrices_dict
#######
#Analyse N-to-C asymmetry in secondary structure elements frequency.
#######
def distribution_of_frequences(ss_pfm_N, ss_pfm_C):
ss_pfm=ss_pfm_N+ss_pfm_C
print(len(ss_pfm))
fig, plot=plt.subplots(1,1,figsize=(4,4), dpi=100)
weights=np.ones_like(ss_pfm)/(len(ss_pfm)) #Taken from https://stackoverflow.com/questions/42481698/probability-density-histogram-with-matplotlib-doesnt-make-sense
plot.hist(ss_pfm, bins=10, weights=weights)
plot.set_xlabel('Frequency of ss element')
plot.set_ylabel('Fraction of positions')
plt.show()
return
#######
#Analyse N-to-C asymmetry in secondary structure elements frequency.
#######
def N_to_C_asymmetry(ss_pfm_N_short, ss_pfm_C_short, ss_pfm_conf_N_short, ss_pfm_conf_C_short, ss_pfm_N_long, ss_pfm_C_long, ss_pfm_conf_N_long, ss_pfm_conf_C_long, window_width):
##Plot distribution of frequences, get confidential interval.
##Distribution are far from being normal, estimation is not good -> depricated.
#distribution_of_frequences(ss_pfm_N_short['H'], ss_pfm_C_short['H'])
#distribution_of_frequences(ss_pfm_N_long['H'], ss_pfm_C_long['H'])
#Plot frequency of ss elements as a function of distance from N- and C-termini.
X_N=range(window_width)
X_C=range(60, 60+window_width)
xticks_ar=list(range(0,111,10))
xticklabels_ar=[0,10,20,30,40,50,-50,-40,-30,-20,-10,0]
xticks_spec=[-4, 114]
xticklabels_spec=['N-', '-C']
fig, plot=plt.subplots(2,2,figsize=(12,6), dpi=100)
plot[0,0].plot(X_N, ss_pfm_N_short['H'], color='#94fff1', linewidth=2, label=r'$\alpha$-helix short domains')
plot[0,0].fill_between(X_N, ss_pfm_conf_N_short['H_lower'], ss_pfm_conf_N_short['H_upper'], color='#94fff1', alpha=0.3, linewidth=0)
plot[0,0].plot(X_C, ss_pfm_C_short['H'], color='#94fff1', linewidth=2)
plot[0,0].fill_between(X_C, ss_pfm_conf_C_short['H_lower'], ss_pfm_conf_C_short['H_upper'], color='#94fff1', alpha=0.3, linewidth=0)
plot[0,0].plot(X_N, ss_pfm_N_long['H'], color='#ab658c', linewidth=2, label=r'$\alpha$-helix long domains')
plot[0,0].fill_between(X_N, ss_pfm_conf_N_long['H_lower'], ss_pfm_conf_N_long['H_upper'], color='#ab658c', alpha=0.3, linewidth=0)
plot[0,0].plot(X_C, ss_pfm_C_long['H'], color='#ab658c', linewidth=2)
plot[0,0].fill_between(X_C, ss_pfm_conf_C_long['H_lower'], ss_pfm_conf_C_long['H_upper'], color='#ab658c', alpha=0.3, linewidth=0)
plot[0,0].plot(X_N, ss_pfm_N_short['E'], color='#59acff', linewidth=2, label=r'$\beta$-strand short domains')
plot[0,0].fill_between(X_N, ss_pfm_conf_N_short['E_lower'], ss_pfm_conf_N_short['E_upper'], color='#59acff', alpha=0.3, linewidth=0)
plot[0,0].plot(X_C, ss_pfm_C_short['E'], color='#59acff', linewidth=2)
plot[0,0].fill_between(X_C, ss_pfm_conf_C_short['E_lower'], ss_pfm_conf_C_short['E_upper'], color='#59acff', alpha=0.3, linewidth=0)
plot[0,0].plot(X_N, ss_pfm_N_long['E'], color='#ffec59', linewidth=2, label=r'$\beta$-strand long domains')
plot[0,0].fill_between(X_N, ss_pfm_conf_N_long['E_lower'], ss_pfm_conf_N_long['E_upper'], color='#ffec59', alpha=0.3, linewidth=0)
plot[0,0].plot(X_C, ss_pfm_C_long['E'], color='#ffec59', linewidth=2)
plot[0,0].fill_between(X_C, ss_pfm_conf_C_long['E_lower'], ss_pfm_conf_C_long['E_upper'], color='#ffec59', alpha=0.3, linewidth=0)
plot[0,0].set_xticks(xticks_ar)
plot[0,0].set_xticklabels(xticklabels_ar)
plot[0,0].set_xticks(xticks_spec, minor=True)
plot[0,0].set_xticklabels(xticklabels_spec, minor=True)
plot[0,0].tick_params(axis='x', which='minor', length=0)
plot[0,0].set_xlabel('Distance, aa')
plot[0,0].set_ylabel('Frequency')
plot[0,0].legend(fontsize=8.5, ncol=2, handlelength=0.7, frameon=False, columnspacing=0.7, loc='upper center')
plot[0,1].plot(X_N, ss_pfm_N_short['B'], color='#94fff1', linewidth=2, label=r'$\beta$-bridge short domains')
plot[0,1].fill_between(X_N, ss_pfm_conf_N_short['B_lower'], ss_pfm_conf_N_short['B_upper'], color='#94fff1', alpha=0.3, linewidth=0)
plot[0,1].plot(X_C, ss_pfm_C_short['B'], color='#94fff1', linewidth=2)
plot[0,1].fill_between(X_C, ss_pfm_conf_C_short['B_lower'], ss_pfm_conf_C_short['B_upper'], color='#94fff1', alpha=0.3, linewidth=0)
plot[0,1].plot(X_N, ss_pfm_N_long['B'], color='#ab658c', linewidth=2, label=r'$\beta$-bridge long domains')
plot[0,1].fill_between(X_N, ss_pfm_conf_N_long['B_lower'], ss_pfm_conf_N_long['B_upper'], color='#ab658c', alpha=0.3, linewidth=0)
plot[0,1].plot(X_C, ss_pfm_C_long['B'], color='#ab658c', linewidth=2)
plot[0,1].fill_between(X_C, ss_pfm_conf_C_long['B_lower'], ss_pfm_conf_C_long['B_upper'], color='#ab658c', alpha=0.3, linewidth=0)
plot[0,1].plot(X_N, ss_pfm_N_short['G'], color='#59acff', linewidth=2, label=r'3-10-helix short domains')
plot[0,1].fill_between(X_N, ss_pfm_conf_N_short['G_lower'], ss_pfm_conf_N_short['G_upper'], color='#59acff', alpha=0.3, linewidth=0)
plot[0,1].plot(X_C, ss_pfm_C_short['G'], color='#59acff', linewidth=2)
plot[0,1].fill_between(X_C, ss_pfm_conf_C_short['G_lower'], ss_pfm_conf_C_short['G_upper'], color='#59acff', alpha=0.3, linewidth=0)
plot[0,1].plot(X_N, ss_pfm_N_long['G'], color='#ffec59', linewidth=2, label=r'3-10-helix long domains')
plot[0,1].fill_between(X_N, ss_pfm_conf_N_long['G_lower'], ss_pfm_conf_N_long['G_upper'], color='#ffec59', alpha=0.3, linewidth=0)
plot[0,1].plot(X_C, ss_pfm_C_long['G'], color='#ffec59', linewidth=2)
plot[0,1].fill_between(X_C, ss_pfm_conf_C_long['G_lower'], ss_pfm_conf_C_long['G_upper'], color='#ffec59', alpha=0.3, linewidth=0)
plot[0,1].set_xticks(xticks_ar)
plot[0,1].set_xticklabels(xticklabels_ar)
plot[0,1].set_xticks(xticks_spec, minor=True)
plot[0,1].set_xticklabels(xticklabels_spec, minor=True)
plot[0,1].tick_params(axis='x', which='minor', length=0)
plot[0,1].set_xlabel('Distance, aa')
plot[0,1].set_ylabel('Frequency')
plot[0,1].legend(fontsize=8.5, ncol=2, handlelength=0.7, frameon=False, columnspacing=0.7, loc='center')
plot[1,0].plot(X_N, ss_pfm_N_short['I'], color='#94fff1', linewidth=2, label=r'$\pi$-helix short domains')
plot[1,0].fill_between(X_N, ss_pfm_conf_N_short['I_lower'], ss_pfm_conf_N_short['I_upper'], color='#94fff1', alpha=0.3, linewidth=0)
plot[1,0].plot(X_C, ss_pfm_C_short['I'], color='#94fff1', linewidth=2)
plot[1,0].fill_between(X_C, ss_pfm_conf_C_short['I_lower'], ss_pfm_conf_C_short['I_upper'], color='#94fff1', alpha=0.3, linewidth=0)
plot[1,0].plot(X_N, ss_pfm_N_long['I'], color='#ab658c', linewidth=2, label=r'$\pi$-helix long domains')
plot[1,0].fill_between(X_N, ss_pfm_conf_N_long['I_lower'], ss_pfm_conf_N_long['I_upper'], color='#ab658c', alpha=0.3, linewidth=0)
plot[1,0].plot(X_C, ss_pfm_C_long['I'], color='#ab658c', linewidth=2)
plot[1,0].fill_between(X_C, ss_pfm_conf_C_long['I_lower'], ss_pfm_conf_C_long['I_upper'], color='#ab658c', alpha=0.3, linewidth=0)
plot[1,0].plot(X_N, ss_pfm_N_short['T'], color='#59acff', linewidth=2, label=r'turn short domains')
plot[1,0].fill_between(X_N, ss_pfm_conf_N_short['T_lower'], ss_pfm_conf_N_short['T_upper'], color='#59acff', alpha=0.3, linewidth=0)
plot[1,0].plot(X_C, ss_pfm_C_short['T'], color='#59acff', linewidth=2)
plot[1,0].fill_between(X_C, ss_pfm_conf_C_short['T_lower'], ss_pfm_conf_C_short['T_upper'], color='#59acff', alpha=0.3, linewidth=0)
plot[1,0].plot(X_N, ss_pfm_N_long['T'], color='#ffec59', linewidth=2, label=r'turn long domains')
plot[1,0].fill_between(X_N, ss_pfm_conf_N_long['T_lower'], ss_pfm_conf_N_long['T_upper'], color='#ffec59', alpha=0.3, linewidth=0)
plot[1,0].plot(X_C, ss_pfm_C_long['T'], color='#ffec59', linewidth=2)
plot[1,0].fill_between(X_C, ss_pfm_conf_C_long['T_lower'], ss_pfm_conf_C_long['T_upper'], color='#ffec59', alpha=0.3, linewidth=0)
plot[1,0].set_xticks(xticks_ar)
plot[1,0].set_xticklabels(xticklabels_ar)
plot[1,0].set_xticks(xticks_spec, minor=True)
plot[1,0].set_xticklabels(xticklabels_spec, minor=True)
plot[1,0].tick_params(axis='x', which='minor', length=0)
plot[1,0].set_xlabel('Distance, aa')
plot[1,0].set_ylabel('Frequency')
plot[1,0].legend(fontsize=8.5, ncol=2, handlelength=0.7, frameon=False, columnspacing=0.7, loc='center')
plot[1,1].plot(X_N, ss_pfm_N_short['S'], color='#94fff1', linewidth=2, label=r'bend short domains')
plot[1,1].fill_between(X_N, ss_pfm_conf_N_short['S_lower'], ss_pfm_conf_N_short['S_upper'], color='#94fff1', alpha=0.3, linewidth=0)
plot[1,1].plot(X_C, ss_pfm_C_short['S'], color='#94fff1', linewidth=2)
plot[1,1].fill_between(X_C, ss_pfm_conf_C_short['S_lower'], ss_pfm_conf_C_short['S_upper'], color='#94fff1', alpha=0.3, linewidth=0)
plot[1,1].plot(X_N, ss_pfm_N_long['S'], color='#ab658c', linewidth=2, label=r'bend long domains')
plot[1,1].fill_between(X_N, ss_pfm_conf_N_long['S_lower'], ss_pfm_conf_N_long['S_upper'], color='#ab658c', alpha=0.3, linewidth=0)
plot[1,1].plot(X_C, ss_pfm_C_long['S'], color='#ab658c', linewidth=2)
plot[1,1].fill_between(X_C, ss_pfm_conf_C_long['S_lower'], ss_pfm_conf_C_long['S_upper'], color='#ab658c', alpha=0.3, linewidth=0)
plot[1,1].plot(X_N, ss_pfm_N_short['-'], color='#59acff', linewidth=2, label=r'unstructured short domains')
plot[1,1].fill_between(X_N, ss_pfm_conf_N_short['-_lower'], ss_pfm_conf_N_short['-_upper'], color='#59acff', alpha=0.3, linewidth=0)
plot[1,1].plot(X_C, ss_pfm_C_short['-'], color='#59acff', linewidth=2)
plot[1,1].fill_between(X_C, ss_pfm_conf_C_short['-_lower'], ss_pfm_conf_C_short['-_upper'], color='#59acff', alpha=0.3, linewidth=0)
plot[1,1].plot(X_N, ss_pfm_N_long['-'], color='#ffec59', linewidth=2, label=r'unstructured long domains')
plot[1,1].fill_between(X_N, ss_pfm_conf_N_long['-_lower'], ss_pfm_conf_N_long['-_upper'], color='#ffec59', alpha=0.3, linewidth=0)
plot[1,1].plot(X_C, ss_pfm_C_long['-'], color='#ffec59', linewidth=2)
plot[1,1].fill_between(X_C, ss_pfm_conf_C_long['-_lower'], ss_pfm_conf_C_long['-_upper'], color='#ffec59', alpha=0.3, linewidth=0)
plot[1,1].set_xticks(xticks_ar)
plot[1,1].set_xticklabels(xticklabels_ar)
plot[1,1].set_xticks(xticks_spec, minor=True)
plot[1,1].set_xticklabels(xticklabels_spec, minor=True)
plot[1,1].tick_params(axis='x', which='minor', length=0)
plot[1,1].set_xlabel('Distance, aa')
plot[1,1].set_ylabel('Frequency')
plot[1,1].legend(fontsize=8.5, ncol=2, handlelength=0.7, frameon=False, columnspacing=0.7, loc='center')
plt.tight_layout()
plt.show()
return
#######
#Enrichment of ss elements at N-terminus over the elements at C-terminus.
#######
def N_to_C_enrichment(enrichment_N_to_C_dict_short, enrichment_N_to_C_dict_long, window_width):
#Plot enrichment of ss elements as a function of distance from termini.
X=range(window_width)
xticks_ar=list(range(0,51,10))
xticklabels_ar=[0,10,20,30,40,50]
xticks_spec=[-4]
xticklabels_spec=['N-\nC-', ]
fig, plot=plt.subplots(2,2,figsize=(12,6), dpi=100)
plot[0,0].plot(X, enrichment_N_to_C_dict_short['H'], color='#94fff1', linewidth=2, label=r'$\alpha$-helix short domains')
plot[0,0].plot(X, enrichment_N_to_C_dict_long['H'], color='#ab658c', linewidth=2, label=r'$\alpha$-helix long domains')
plot[0,0].plot(X, enrichment_N_to_C_dict_short['E'], color='#59acff', linewidth=2, label=r'$\beta$-strand short domains')
plot[0,0].plot(X, enrichment_N_to_C_dict_long['E'], color='#ffec59', linewidth=2, label=r'$\beta$-strand long domains')
plot[0,0].set_xticks(xticks_ar)
plot[0,0].set_xticklabels(xticklabels_ar)
plot[0,0].set_xticks(xticks_spec, minor=True)
plot[0,0].set_xticklabels(xticklabels_spec, minor=True)
plot[0,0].tick_params(axis='x', which='minor', length=0)
plot[0,0].set_xlabel('Distance, aa')
plot[0,0].set_ylabel('Enrichment p(N)/p(C)')
plot[0,0].legend(fontsize=8.5, ncol=2, handlelength=0.7, frameon=False, columnspacing=0.7, loc='upper center')
plot[0,1].plot(X, enrichment_N_to_C_dict_short['B'], color='#94fff1', linewidth=2, label=r'$\beta$-bridge short domains')
plot[0,1].plot(X, enrichment_N_to_C_dict_long['B'], color='#ab658c', linewidth=2, label=r'$\beta$-bridge long domains')
plot[0,1].plot(X, enrichment_N_to_C_dict_short['G'], color='#59acff', linewidth=2, label=r'3-10-helix short domains')
plot[0,1].plot(X, enrichment_N_to_C_dict_long['G'], color='#ffec59', linewidth=2, label=r'3-10-helix long domains')
plot[0,1].set_xticks(xticks_ar)
plot[0,1].set_xticklabels(xticklabels_ar)
plot[0,1].set_xticks(xticks_spec, minor=True)
plot[0,1].set_xticklabels(xticklabels_spec, minor=True)
plot[0,1].tick_params(axis='x', which='minor', length=0)
plot[0,1].set_xlabel('Distance, aa')
plot[0,1].set_ylabel('Enrichment p(N)/p(C)')
plot[0,1].legend(fontsize=8.5, ncol=2, handlelength=0.7, frameon=False, columnspacing=0.7, loc='upper center')
plot[1,0].plot(X, enrichment_N_to_C_dict_short['I'], color='#94fff1', linewidth=2, label=r'$\pi$-helix short domains')
plot[1,0].plot(X, enrichment_N_to_C_dict_long['I'], color='#ab658c', linewidth=2, label=r'$\pi$-helix long domains')
plot[1,0].plot(X, enrichment_N_to_C_dict_short['T'], color='#59acff', linewidth=2, label=r'turn short domains')
plot[1,0].plot(X, enrichment_N_to_C_dict_long['T'], color='#ffec59', linewidth=2, label=r'turn long domains')
plot[1,0].set_xticks(xticks_ar)
plot[1,0].set_xticklabels(xticklabels_ar)
plot[1,0].set_xticks(xticks_spec, minor=True)
plot[1,0].set_xticklabels(xticklabels_spec, minor=True)
plot[1,0].tick_params(axis='x', which='minor', length=0)
plot[1,0].set_xlabel('Distance, aa')
plot[1,0].set_ylabel('Enrichment p(N)/p(C)')
plot[1,0].legend(fontsize=8.5, ncol=2, handlelength=0.7, frameon=False, columnspacing=0.7, loc='upper center')
plot[1,1].plot(X, enrichment_N_to_C_dict_short['S'], color='#94fff1', linewidth=2, label=r'bend short domains')
plot[1,1].plot(X, enrichment_N_to_C_dict_long['S'], color='#ab658c', linewidth=2, label=r'bend long domains')
plot[1,1].plot(X, enrichment_N_to_C_dict_short['-'], color='#59acff', linewidth=2, label=r'unstructured short domains')
plot[1,1].plot(X, enrichment_N_to_C_dict_long['-'], color='#ffec59', linewidth=2, label=r'unstructured long domains')
plot[1,1].set_xticks(xticks_ar)
plot[1,1].set_xticklabels(xticklabels_ar)
plot[1,1].set_xticks(xticks_spec, minor=True)
plot[1,1].set_xticklabels(xticklabels_spec, minor=True)
plot[1,1].tick_params(axis='x', which='minor', length=0)
plot[1,1].set_xlabel('Distance, aa')
plot[1,1].set_ylabel('Enrichment p(N)/p(C)')
plot[1,1].legend(fontsize=8.5, ncol=2, handlelength=0.7, frameon=False, columnspacing=0.7, loc='upper center')
plt.tight_layout()
plt.show()
return
#######
#Plot co-occurence of ss elements at termini.
#######
def plot_co_occurence(Obs_over_exp_matrices_short, Obs_over_exp_matrices_long, local_window_width):
fig, plot=plt.subplots(2,2,figsize=(7,7), dpi=100)
ticks_ar=list(range(-1, local_window_width, int(local_window_width/10)))
ticks_ar[0]=0
print(list(ticks_ar))
ticklabels_ar=np.array(list(ticks_ar))+1
plot00=plot[0,0].imshow(Obs_over_exp_matrices_short['EH'], cmap='gnuplot', vmin=0.3, vmax=2.1, interpolation='nearest')
plot[0,0].set_title(r'$\beta-\alpha$ short domains')
plo00_cbar=plot[0,0].figure.colorbar(plot00, ax=plot[0,0], shrink=0.7)
plot[0,0].set_xticks(ticks_ar)
plot[0,0].set_xticklabels(ticklabels_ar)
plot[0,0].set_yticks(ticks_ar)
plot[0,0].set_yticklabels(ticklabels_ar)
plot[0,0].set_xlabel(r'Distance from C-terminus, aa ($\alpha$)')
plot[0,0].set_ylabel(r'Distance from N-terminus, aa ($\beta$)')
plo00_cbar.ax.set_ylabel('p(Obs)/p(Exp)', rotation=-90, va="bottom")
plot01=plot[0,1].imshow(Obs_over_exp_matrices_long['EH'], cmap='gnuplot', vmin=0.3, vmax=2.1, interpolation='nearest')
plot[0,1].set_title(r'$\beta-\alpha$-helix long domains')
plo01_cbar=plot[0,1].figure.colorbar(plot01, ax=plot[0,1], shrink=0.7)
plot[0,1].set_xticks(ticks_ar)
plot[0,1].set_xticklabels(ticklabels_ar)
plot[0,1].set_yticks(ticks_ar)
plot[0,1].set_yticklabels(ticklabels_ar)
plot[0,1].set_xlabel(r'Distance from C-terminus, aa ($\alpha$)')
plot[0,1].set_ylabel(r'Distance from N-terminus, aa ($\beta$)')
plo01_cbar.ax.set_ylabel('p(Obs)/p(Exp)', rotation=-90, va="bottom")
plot10=plot[1,0].imshow(Obs_over_exp_matrices_short['HE'], cmap='gnuplot', vmin=0.3, vmax=2.1, interpolation='nearest')
plot[1,0].set_title(r'$\alpha-\beta$-strand short domains')
plo10_cbar=plot[0,0].figure.colorbar(plot10, ax=plot[1,0], shrink=0.7)
plot[1,0].set_xticks(ticks_ar)
plot[1,0].set_xticklabels(ticklabels_ar)
plot[1,0].set_yticks(ticks_ar)
plot[1,0].set_yticklabels(ticklabels_ar)
plot[1,0].set_xlabel(r'Distance from C-terminus, aa ($\beta$)')
plot[1,0].set_ylabel(r'Distance from N-terminus, aa ($\alpha$)')
plo10_cbar.ax.set_ylabel('p(Obs)/p(Exp)', rotation=-90, va="bottom")
plot11=plot[1,1].imshow(Obs_over_exp_matrices_long['HE'], cmap='gnuplot', vmin=0.3, vmax=2.1, interpolation='nearest')
plot[1,1].set_title(r'$\alpha-\beta$-strand long domains')
plo11_cbar=plot[1,1].figure.colorbar(plot11, ax=plot[1,1], shrink=0.7)
plot[1,1].set_xticks(ticks_ar)
plot[1,1].set_xticklabels(ticklabels_ar)
plot[1,1].set_yticks(ticks_ar)
plot[1,1].set_yticklabels(ticklabels_ar)
plot[1,1].set_xlabel(r'Distance from C-terminus, aa ($\beta$)')
plot[1,1].set_ylabel(r'Distance from N-terminus, aa ($\alpha$)')
plo11_cbar.ax.set_ylabel('p(Obs)/p(Exp)', rotation=-90, va="bottom")
plt.tight_layout()
plt.show()
return
#######
#Wrapper function.
#######
def wrapper(DSSP_inpath):
#Define domain length trhresholds.
min_len=50
thr_len=130
#Define distance from termini to analyse.
window_width=50
local_window_width=50
#Read DSSP data.
DSSP_data_dict=read_dssp_data(DSSP_inpath)
#Classify domains by length.
Short_structures, Long_structures=define_length_groups(DSSP_data_dict, min_len, thr_len)
#Get phi, psi angles for N- and C-termini.
sphi_N, sphi_C, spsi_N, spsi_C, sphi, spsi=phi_psi_N_to_C(Short_structures, window_width)
lphi_N, lphi_C, lpsi_N, lpsi_C, lphi, lpsi=phi_psi_N_to_C(Long_structures, window_width)
#Compute position frequency matrices.
ss_matrix_N_short, ss_matrix_C_short, ss_pfm_N_short, ss_pfm_C_short, ss_pfm_conf_N_short, ss_pfm_conf_C_short=ss_element_frequency_matrix(Short_structures, window_width)
ss_matrix_N_long, ss_matrix_C_long, ss_pfm_N_long, ss_pfm_C_long, ss_pfm_conf_N_long, ss_pfm_conf_C_long=ss_element_frequency_matrix(Long_structures, window_width)
#Plot frequency of ss elements as a function of a distance from termini.
N_to_C_asymmetry(ss_pfm_N_short, ss_pfm_C_short, ss_pfm_conf_N_short, ss_pfm_conf_C_short, ss_pfm_N_long, ss_pfm_C_long, ss_pfm_conf_N_long, ss_pfm_conf_C_long, window_width)
#Enrichment of ss elements N- over C-terminus.
enrichment_N_to_C_dict_short=ss_ele_enrichment(ss_pfm_N_short, ss_pfm_C_short)
enrichment_N_to_C_dict_long=ss_ele_enrichment(ss_pfm_N_long, ss_pfm_C_long)
#Plot enrichment of frequency of ss elements at N-terminus over C-terminus.
N_to_C_enrichment(enrichment_N_to_C_dict_short, enrichment_N_to_C_dict_long, window_width)
#Analyse co-occurence of secondary structure elements at protein termini.
Obs_over_exp_matrices_short=termini_dependance(ss_matrix_N_short, ss_matrix_C_short, ss_pfm_N_short, ss_pfm_C_short, local_window_width)
Obs_over_exp_matrices_long=termini_dependance(ss_matrix_N_long, ss_matrix_C_long, ss_pfm_N_long, ss_pfm_C_long, local_window_width)
#Plot co-occurence of secondary structure elements at protein termini.
plot_co_occurence(Obs_over_exp_matrices_short, Obs_over_exp_matrices_long, local_window_width)
return
wrapper(DSSP_data_inpath) | 50.730769 | 179 | 0.687576 |
Subsets and Splits