hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9125319261fb94bc69a897401585fdd40320b1d2
| 25,070 |
py
|
Python
|
train_multi_human.py
|
wenliangdai/sunets-reproduce
|
d92efa80e8314aea153d498cce3c9c6e30c252bd
|
[
"MIT"
] | 2 |
2018-07-02T16:03:07.000Z
|
2018-07-02T16:03:07.000Z
|
train_multi_human.py
|
wenliangdai/sunets-reproduce
|
d92efa80e8314aea153d498cce3c9c6e30c252bd
|
[
"MIT"
] | null | null | null |
train_multi_human.py
|
wenliangdai/sunets-reproduce
|
d92efa80e8314aea153d498cce3c9c6e30c252bd
|
[
"MIT"
] | null | null | null |
import argparse
import math
import os
import pickle
import random
import sys
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from torch import nn
from torch.optim import lr_scheduler
from torch.utils import data
import torchvision.transforms as transforms
import transforms as extended_transforms
from loss import prediction_stat
from main import get_data_path
from main.loader import get_loader
from main.models import get_model
from utils import dotdict, float2str
# paths
ROOT = '/home/wenlidai/sunets-reproduce/'
RESULT = 'results'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('--arch', nargs='?', type=str, default='sunet64_multi',
help='Architecture to use [\'sunet64, sunet128, sunet7128 etc\']')
parser.add_argument('--model_path', help='Path to the saved model', type=str)
parser.add_argument('--best_model_path', help='Path to the saved best model', type=str)
parser.add_argument('--dataset', nargs='?', type=str, default='human',
help='Dataset to use [\'sbd, coco, cityscapes etc\']')
parser.add_argument('--img_rows', nargs='?', type=int, default=512,
help='Height of the input image')
parser.add_argument('--img_cols', nargs='?', type=int, default=512,
help='Width of the input image')
parser.add_argument('--epochs', nargs='?', type=int, default=90,
help='# of the epochs')
parser.add_argument('--batch_size', nargs='?', type=int, default=10,
help='Batch Size')
parser.add_argument('--lr', nargs='?', type=float, default=0.0005,
help='Learning Rate')
parser.add_argument('--manual_seed', default=0, type=int,
help='manual seed')
parser.add_argument('--iter_size', type=int, default=1,
help='number of batches per weight updates')
parser.add_argument('--log_size', type=int, default=400,
help='iteration period of logging segmented images')
parser.add_argument('--dprob', nargs='?', type=float, default=1e-7,
help='Dropout probability')
parser.add_argument('--momentum', nargs='?', type=float, default=0.95,
help='Momentum for SGD')
parser.add_argument('--momentum_bn', nargs='?', type=float, default=0.01,
help='Momentum for BN')
parser.add_argument('--weight_decay', nargs='?', type=float, default=1e-4,
help='Weight decay')
parser.add_argument('--output_stride', nargs='?', type=str, default='16',
help='Output stride to use [\'32, 16, 8 etc\']')
parser.add_argument('--freeze', action='store_true',
help='Freeze BN params')
parser.add_argument('--restore', action='store_true',
help='Restore Optimizer params')
parser.add_argument('--epoch_log_size', nargs='?', type=str, default=20,
help='Every [epoch_log_size] iterations to print loss in each epoch')
parser.add_argument('--pretrained', action='store_true',
help='Use pretrained ImageNet initialization or not')
parser.add_argument('--n_classes', nargs='?', type=int, action='append',
help='number of classes of the labels')
parser.add_argument('--optim', nargs='?', type=str, default='SGD',
help='Optimizer to use [\'SGD, Nesterov etc\']')
global args
args = parser.parse_args()
RESULT = '{}_{}_{}'.format(RESULT, args.arch, args.dataset)
if args.pretrained:
RESULT = RESULT + '_pretrained'
main(args)
| 55.835189 | 210 | 0.657359 |
9125a2258a5cbeeafce52644773c51a924d107ac
| 392 |
py
|
Python
|
exemplos/exemplo-aula-14-01.py
|
quitaiskiluisf/TI4F-2021-LogicaProgramacao
|
d12e5c389a43c98f27726df5618fe529183329a8
|
[
"Unlicense"
] | null | null | null |
exemplos/exemplo-aula-14-01.py
|
quitaiskiluisf/TI4F-2021-LogicaProgramacao
|
d12e5c389a43c98f27726df5618fe529183329a8
|
[
"Unlicense"
] | null | null | null |
exemplos/exemplo-aula-14-01.py
|
quitaiskiluisf/TI4F-2021-LogicaProgramacao
|
d12e5c389a43c98f27726df5618fe529183329a8
|
[
"Unlicense"
] | null | null | null |
# Apresentao
print('Programa para somar 8 valores utilizando vetores/listas')
print()
# Declarao do vetor
valores = [0, 0, 0, 0, 0, 0, 0, 0]
# Solicita os valores
for i in range(len(valores)):
valores[i] = int(input('Informe o valor: '))
# Clculo da soma
soma = 0
for i in range(len(valores)):
soma += valores[i]
# Apresenta o resultado
print(f'A soma dos valores {soma}')
| 20.631579 | 64 | 0.67602 |
9125c9f61c337477b68228ea1ba426e48ce06b1a
| 333 |
py
|
Python
|
day3/p1.py
|
pwicks86/adventofcode2015
|
fba7cc8f6942f43f5b0226a0ac70365630f14cbd
|
[
"MIT"
] | null | null | null |
day3/p1.py
|
pwicks86/adventofcode2015
|
fba7cc8f6942f43f5b0226a0ac70365630f14cbd
|
[
"MIT"
] | null | null | null |
day3/p1.py
|
pwicks86/adventofcode2015
|
fba7cc8f6942f43f5b0226a0ac70365630f14cbd
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
f = open("input.txt")
d = f.read()
houses = defaultdict(int,{(0,0):1})
cur = [0,0]
for c in d:
if c == "<":
cur[0] -= 1
if c == ">":
cur[0] += 1
if c == "v":
cur[1] += 1
if c == "^":
cur[1] -= 1
houses[tuple(cur)]+=1
print(len(houses.keys()))
| 18.5 | 35 | 0.456456 |
91266dc2fa03da47339e3882e71342b1ee45462b
| 2,326 |
py
|
Python
|
pbr/config/blend_config.py
|
NUbots/NUpbr
|
49b0d2abd15512a93bfe21157269288c9ec4c54d
|
[
"MIT"
] | 1 |
2019-03-25T04:37:06.000Z
|
2019-03-25T04:37:06.000Z
|
pbr/config/blend_config.py
|
NUbots/NUpbr
|
49b0d2abd15512a93bfe21157269288c9ec4c54d
|
[
"MIT"
] | 3 |
2020-07-24T11:55:48.000Z
|
2022-02-20T20:49:17.000Z
|
pbr/config/blend_config.py
|
NUbots/NUpbr
|
49b0d2abd15512a93bfe21157269288c9ec4c54d
|
[
"MIT"
] | null | null | null |
# Blender-specific Configuration Settings
from math import pi
render = {
"render_engine": "CYCLES",
"render": {"cycles_device": "GPU"},
"dimensions": {"resolution": [1280, 1024], "percentage": 100.0},
"sampling": {"cycles_samples": 256, "cycles_preview_samples": 16},
"light_paths": {
"transparency": {"max_bounces": 1, "min_bounces": 1},
"bounces": {"max_bounces": 1, "min_bounces": 1},
"diffuse": 1,
"glossy": 1,
"transmission": 1,
"volume": 0,
"reflective_caustics": False,
"refractive_caustics": False,
},
"performance": {
"render_tile": [512, 512],
"threads": {"mode": "FIXED", "num_threads": 8},
},
"layers": {"use_hair": False},
}
scene = {"units": {"length_units": "METRIC", "rotation_units": "DEGREES"}}
layers = {"denoising": {"use_denoising": False}}
field = {
"material": {
"mapping": {
"translation": (0.0, 0.05, 0.0),
"rotation": (0.0, -pi / 2.0, 0.0),
"scale": (1.0, 0.6, 1.0),
},
"mix_lower_grass": {
"inp1": (0.000, 0.012, 0.00076, 1.0),
"inp2": (0.020, 0.011, 0.0, 1.0),
},
"mix_upper_grass": {
"inp1": (0.247, 0.549, 0.0, 1),
"inp2": (0.257, 0.272, 0.0, 1),
},
"noise": {"inp": [5.0, 2.0, 0.0]},
"hsv": {"inp": [0.0, 0.0, 1.9, 1.0]},
"mix_up_grass_hsv": {"inp0": 0.455},
"mix_low_grass_field_lines": {"inp0": 0.4},
"mix_grass": {"inp0": 0.391},
"principled": {"specular": 0.225, "roughness": 0.625},
},
"lower_plane": {
"colour": (0.003, 0.04, 0.0, 1.0),
"principled": {"specular": 0.225, "roughness": 1.0},
"mapping": {"scale": (0.1, 0.1, 1.0)},
},
}
ball = {
"initial_cond": {"segments": 16, "ring_count": 10, "calc_uvs": True},
"material": {"metallic": 0.0, "roughness": 0.35},
"subsurf_mod": {"levels": 1, "rend_levels": 4},
}
goal = {
"initial_cond": {"vertices": 32, "calc_uvs": True},
"corner_curve": {"fill": "FULL"},
"material": {"metallic": 0.0, "roughness": 0.35, "colour": (0.8, 0.8, 0.8, 1.0)},
"subsurf_mod": {"levels": 1, "rend_levels": 4},
}
robot = {"material": {"specular": 0.742, "metallic": 0.0, "roughness": 0.9}}
| 31.432432 | 85 | 0.503439 |
912692288f987cd8f54127db16d2b577edc80fc1
| 7,022 |
py
|
Python
|
simglucose/controller/basal_bolus_ctrller.py
|
mia-jingyi/simglucose
|
a90bd8750fce362be91668ed839b3b252bc0d58d
|
[
"MIT"
] | null | null | null |
simglucose/controller/basal_bolus_ctrller.py
|
mia-jingyi/simglucose
|
a90bd8750fce362be91668ed839b3b252bc0d58d
|
[
"MIT"
] | null | null | null |
simglucose/controller/basal_bolus_ctrller.py
|
mia-jingyi/simglucose
|
a90bd8750fce362be91668ed839b3b252bc0d58d
|
[
"MIT"
] | null | null | null |
from .base import Controller
from .base import Action
import numpy as np
import pandas as pd
import logging
from collections import namedtuple
from tqdm import tqdm
logger = logging.getLogger(__name__)
CONTROL_QUEST = 'simglucose/params/Quest.csv'
PATIENT_PARA_FILE = 'simglucose/params/vpatient_params.csv'
ParamTup = namedtuple('ParamTup', ['basal', 'cf', 'cr'])
def bb_test(bbc, env, n_days, seed, full_save=False):
env.seeds['sensor'] = seed
env.seeds['scenario'] = seed
env.seeds['patient'] = seed
env.reset()
full_patient_state = []
carb_error_mean = 0
carb_error_std = 0.2
carb_miss_prob = 0.05
action = bbc.manual_bb_policy(carbs=0, glucose=140)
for _ in tqdm(range(n_days*288)):
obs, reward, done, info = env.step(action=action.basal+action.bolus)
bg = env.env.CGM_hist[-1]
carbs = info['meal']
if np.random.uniform() < carb_miss_prob:
carbs = 0
err = np.random.normal(carb_error_mean, carb_error_std)
carbs = carbs + carbs * err
action = bbc.manual_bb_policy(carbs=carbs, glucose=bg)
full_patient_state.append(info['patient_state'])
full_patient_state = np.stack(full_patient_state)
if full_save:
return env.env.show_history(), full_patient_state
else:
return {'hist': env.env.show_history()[288:]}
| 38.582418 | 99 | 0.602108 |
912788fe05c2b0029d03454b315f2758ce890c5a
| 6,025 |
py
|
Python
|
ceilometer/event/trait_plugins.py
|
redhat-openstack/ceilometer
|
9e503d7068889e52e9144079de331ed51676e535
|
[
"Apache-2.0"
] | 1 |
2016-03-10T06:55:45.000Z
|
2016-03-10T06:55:45.000Z
|
ceilometer/event/trait_plugins.py
|
redhat-openstack/ceilometer
|
9e503d7068889e52e9144079de331ed51676e535
|
[
"Apache-2.0"
] | null | null | null |
ceilometer/event/trait_plugins.py
|
redhat-openstack/ceilometer
|
9e503d7068889e52e9144079de331ed51676e535
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2013 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
| 37.42236 | 77 | 0.550539 |
9127b9612983c8643c1eb5911a7a12880ad76607
| 803 |
py
|
Python
|
web13/jsonapi.py
|
gongjunhuang/web
|
9412f6fd7c223174fdb30f4d7a8b61a8e130e329
|
[
"Apache-2.0"
] | null | null | null |
web13/jsonapi.py
|
gongjunhuang/web
|
9412f6fd7c223174fdb30f4d7a8b61a8e130e329
|
[
"Apache-2.0"
] | null | null | null |
web13/jsonapi.py
|
gongjunhuang/web
|
9412f6fd7c223174fdb30f4d7a8b61a8e130e329
|
[
"Apache-2.0"
] | null | null | null |
from flask import Flask, redirect, url_for, jsonify, request
app = Flask(__name__)
users = []
'''
Json api
formJson Json
1.
2.open api
3.
RESTful api
Dr. Fielding
url
/GET /players
/GET /player/id id
/PUT /players
/PATCH /players
/DELETE /player/id
/GET /player/id/level
'''
app.run()
| 16.387755 | 60 | 0.636364 |
91291f525e80342e370d1b3f81196a52f9108a77
| 786 |
py
|
Python
|
cards/migrations/0012_auto_20180331_1348.py
|
mhndlsz/memodrop
|
7ba39143c8e4fbe67881b141accedef535e936e6
|
[
"MIT"
] | 18 |
2018-04-15T14:01:25.000Z
|
2022-03-16T14:57:28.000Z
|
cards/migrations/0012_auto_20180331_1348.py
|
mhndlsz/memodrop
|
7ba39143c8e4fbe67881b141accedef535e936e6
|
[
"MIT"
] | null | null | null |
cards/migrations/0012_auto_20180331_1348.py
|
mhndlsz/memodrop
|
7ba39143c8e4fbe67881b141accedef535e936e6
|
[
"MIT"
] | 4 |
2018-04-15T14:16:12.000Z
|
2020-08-10T14:31:48.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-31 13:48
from __future__ import unicode_literals
from django.db import migrations, models
| 25.354839 | 68 | 0.577608 |
912a168bff4536c4b4657348252f51f09a3dbc8c
| 1,776 |
py
|
Python
|
MoMMI/Modules/ss14_nudges.py
|
T6751/MoMMI
|
4b9dd0d49c6e2bd82b82a4893fc35475d4e39e9a
|
[
"MIT"
] | 18 |
2016-08-06T17:31:59.000Z
|
2021-12-24T13:08:02.000Z
|
MoMMI/Modules/ss14_nudges.py
|
T6751/MoMMI
|
4b9dd0d49c6e2bd82b82a4893fc35475d4e39e9a
|
[
"MIT"
] | 29 |
2016-08-07T14:03:00.000Z
|
2022-01-23T21:05:33.000Z
|
MoMMI/Modules/ss14_nudges.py
|
T6751/MoMMI
|
4b9dd0d49c6e2bd82b82a4893fc35475d4e39e9a
|
[
"MIT"
] | 25 |
2016-08-08T12:56:02.000Z
|
2022-02-09T07:17:51.000Z
|
import logging
from typing import Match, Any, Dict
import aiohttp
from discord import Message
from MoMMI import comm_event, command, MChannel, always_command
logger = logging.getLogger(__name__)
| 27.75 | 126 | 0.649212 |
912b4d4f0a7c8620ff9eef12211953cd7f872472
| 1,633 |
py
|
Python
|
oxe-api/test/resource/company/test_get_company_taxonomy.py
|
CybersecurityLuxembourg/openxeco
|
8d4e5578bde6a07f5d6d569b16b4de224abf7bf0
|
[
"BSD-2-Clause"
] | null | null | null |
oxe-api/test/resource/company/test_get_company_taxonomy.py
|
CybersecurityLuxembourg/openxeco
|
8d4e5578bde6a07f5d6d569b16b4de224abf7bf0
|
[
"BSD-2-Clause"
] | null | null | null |
oxe-api/test/resource/company/test_get_company_taxonomy.py
|
CybersecurityLuxembourg/openxeco
|
8d4e5578bde6a07f5d6d569b16b4de224abf7bf0
|
[
"BSD-2-Clause"
] | null | null | null |
from test.BaseCase import BaseCase
| 49.484848 | 102 | 0.62278 |
912c1f2c9b394208b14b4781f1f67d739e19f340
| 539 |
py
|
Python
|
spoon/models/groupmembership.py
|
mikeboers/Spoon
|
9fe4a06be7c2c6c307b79e72893e32f2006de4ea
|
[
"BSD-3-Clause"
] | 4 |
2017-11-05T02:54:39.000Z
|
2022-03-01T06:01:20.000Z
|
spoon/models/groupmembership.py
|
mikeboers/Spoon
|
9fe4a06be7c2c6c307b79e72893e32f2006de4ea
|
[
"BSD-3-Clause"
] | null | null | null |
spoon/models/groupmembership.py
|
mikeboers/Spoon
|
9fe4a06be7c2c6c307b79e72893e32f2006de4ea
|
[
"BSD-3-Clause"
] | null | null | null |
import sqlalchemy as sa
from ..core import db
| 22.458333 | 68 | 0.651206 |
912c4617e4d0718d34c2b278ca0d1aef755136f4
| 59,222 |
py
|
Python
|
nonlinear/aorta/nonlinearCasesCreation_aorta.py
|
HaolinCMU/Soft_tissue_tracking
|
8592b87066ddec84a3aefc18240303cb085cf34c
|
[
"MIT"
] | 3 |
2020-08-25T05:10:34.000Z
|
2020-09-18T01:50:33.000Z
|
nonlinear/aorta/nonlinearCasesCreation_aorta.py
|
HaolinCMU/Soft_tissue_tracking
|
8592b87066ddec84a3aefc18240303cb085cf34c
|
[
"MIT"
] | null | null | null |
nonlinear/aorta/nonlinearCasesCreation_aorta.py
|
HaolinCMU/Soft_tissue_tracking
|
8592b87066ddec84a3aefc18240303cb085cf34c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 25 13:08:16 2020
@author: haolinl
"""
import copy
import os
import time
import numpy as np
import random
import scipy.io # For extracting data from .mat file
def saveLog(file_name_list, elapsed_time_list, write_status, data_file_name,
sample_num, fix_indices_list, loads_num, load_sampling_type, load_param_tuple,
material_type, modulus, poisson_ratio, isCoupleOn, isLaplacianSmoothingOn,
coupling_type="", coupling_neighbor_layer_num=1,
laplacian_iter_num=5, laplacian_smoothing_rate=1e-4, write_path="nonlinear_case_generation.log"):
"""
Save the nonlinear cases generation results into .log file.
Parameters:
----------
file_name_list: List of strings.
Names of generated files.
elapsed_time_list: List of floats.
Elapsed time of generation for each input file.
In exact order.
write_status: String.
Indicating the type of input file generation.
"Normal" / "Fast":
"Normal": generate all definitions;
"Fast": generate nodes and elements definition only.
data_file_name: String.
The name of modeling data file.
Format: .mat
sample_num: Int.
Number of generated input files.
fix_indices_list: List of ints.
Indices of fixed points.
Indexed from 1.
loads_num: Int.
The number of concentrated forces.
load_sampling_type: String.
The distribution type for force sampling.
"uniform" / "gaussian":
"uniform": uniform distribution with specified (min, max) range.
"gaussian": gaussian distribution with specified (mean, dev) parameters.
load_param_tuple: tuple of floats.
Parameters of load sampling.
load_sampling_type specific.
material_type: String.
The type of material.
"linear" / "neo_hookean_solid" / "neo_hookean_fitting":
"linear": linear elastic material.
"neo_hookean_solid": neo-Hookean solid following the stain energy formulation.
"neo_hookean_fitting": neo-Hookean solid following the strass-strain curved fitted from user-input strss-strain data.
modulus: Float.
Elastic modulus of the material.
poisson_ratio: Float.
Poisson's ratio of the material.
isCoupleOn: Boolean indicator.
True: using coupling constraint for local force distribution.
False: not using coupling constraint.
isLaplacianSmoothingOn: Boolean indicator.
True: using Laplacian-Beltrami operator matrix to smooth the force distribution.
False: not using Laplacian smoothing.
coupling_type (optional): String.
The type of coupling constraint.
Default: "".
coupling_neighbor_layer_num (optional): Int.
The number of neighbor layers to which the local force distributing goes.
Default: 1.
laplacian_iter_num (optional): Int.
The number of iteration for laplacian smoothing.
Default: 5.
laplacian_smoothing_rate (optional): Float.
The rate of Laplacian smoothing.
Default: 1e-4.
write_path (optional): String.
The path of to-be-written file.
Default: "nonlinear_case_generation.log".
"""
if isCoupleOn: isCoupleOn_status = "On"
else: isCoupleOn_status = "Off"
if isLaplacianSmoothingOn: isLaplacianSmoothingOn_status = "On"
else: isLaplacianSmoothingOn_status = "Off"
content = ["Data_file_name: {}".format(data_file_name),
"Sample_num = {}".format(sample_num),
"Fixed_indices_list (indexed from 1): {}".format(fix_indices_list),
"Material type: {}".format(material_type),
"Elastic modulus = {} Pa".format(modulus),
"Poisson's ratio = {}".format(poisson_ratio),
"Loads_num = {}".format(loads_num)]
if load_sampling_type == "uniform":
content += ["Load sampling type: {}".format(load_sampling_type),
"Load sampling range (min, max): {} N".format(load_param_tuple)]
elif load_sampling_type == "gaussian":
content += ["Load sampling type: {}".format(load_sampling_type),
"Load sampling parameters (mean, dev): {} N".format(load_param_tuple)]
else:
load_sampling_type = "uniform"
content += ["Load sampling type: {}".format(load_sampling_type),
"Load sampling range (min, max): {} N".format(load_param_tuple)]
content += ["Coupling constraint status: {}".format(isCoupleOn_status),
"Laplacian smoothing status: {}".format(isLaplacianSmoothingOn_status)]
if isCoupleOn:
content += ["Coupling type: {}".format(coupling_type),
"Coupling neighbor layer numbers: {}".format(coupling_neighbor_layer_num)]
if isLaplacianSmoothingOn:
content += ["Laplacian smoothing iteration numbers = {}".format(laplacian_iter_num),
"Laplacian smoothing rate = {}".format(laplacian_smoothing_rate)]
content += ["----------------------------------------------------------",
"Input file\t\tExport status\tGeneration status\tElapsed time/s"]
elapsed_time_total = 0
for i, file_name in enumerate(file_name_list):
data_string_temp = "{}\t\t{}\t\tCompleted\t".format(file_name, write_status) + "\t%.8f" % (elapsed_time_list[i])
content.append(data_string_temp)
elapsed_time_total += elapsed_time_list[i]
content += ["----------------------------------------------------------",
"Total elapsed time: {} s".format(elapsed_time_total)]
content = '\n'.join(content)
with open(write_path, 'w') as f: f.write(content)
if __name__ == "__main__":
main()
| 46.594807 | 238 | 0.590473 |
912dd1c1fee777c8a3a588b4ebb22c1cb4588df4
| 1,790 |
py
|
Python
|
data/cache/test/test_cache.py
|
dongboyan77/quay
|
8018e5bd80f17e6d855b58b7d5f2792d92675905
|
[
"Apache-2.0"
] | 1 |
2020-10-16T19:30:41.000Z
|
2020-10-16T19:30:41.000Z
|
data/cache/test/test_cache.py
|
dongboyan77/quay
|
8018e5bd80f17e6d855b58b7d5f2792d92675905
|
[
"Apache-2.0"
] | 15 |
2020-06-18T15:32:06.000Z
|
2022-03-03T23:06:24.000Z
|
data/cache/test/test_cache.py
|
dongboyan77/quay
|
8018e5bd80f17e6d855b58b7d5f2792d92675905
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from mock import patch
from data.cache import InMemoryDataModelCache, NoopDataModelCache, MemcachedModelCache
from data.cache.cache_key import CacheKey
def test_memcache():
key = CacheKey("foo", "60m")
with patch("data.cache.impl.Client", MockClient):
cache = MemcachedModelCache(("127.0.0.1", "-1"))
assert cache.retrieve(key, lambda: {"a": 1234}) == {"a": 1234}
assert cache.retrieve(key, lambda: {"a": 1234}) == {"a": 1234}
def test_memcache_should_cache():
key = CacheKey("foo", None)
with patch("data.cache.impl.Client", MockClient):
cache = MemcachedModelCache(("127.0.0.1", "-1"))
assert cache.retrieve(key, lambda: {"a": 1234}, should_cache=sc) == {"a": 1234}
# Ensure not cached since it was `1234`.
assert cache._get_client().get(key.key) is None
# Ensure cached.
assert cache.retrieve(key, lambda: {"a": 2345}, should_cache=sc) == {"a": 2345}
assert cache._get_client().get(key.key) is not None
assert cache.retrieve(key, lambda: {"a": 2345}, should_cache=sc) == {"a": 2345}
| 32.545455 | 89 | 0.634078 |
912ed2b516655605fdb89fa39bcc4f1ec0c3ed2a
| 2,306 |
py
|
Python
|
Packs/HealthCheck/Scripts/HealthCheckIncidentsCreatedMonthly/HealthCheckIncidentsCreatedMonthly.py
|
mazmat-panw/content
|
024a65c1dea2548e2637a9cbbe54966e9e34a722
|
[
"MIT"
] | 2 |
2021-12-06T21:38:24.000Z
|
2022-01-13T08:23:36.000Z
|
Packs/HealthCheck/Scripts/HealthCheckIncidentsCreatedMonthly/HealthCheckIncidentsCreatedMonthly.py
|
mazmat-panw/content
|
024a65c1dea2548e2637a9cbbe54966e9e34a722
|
[
"MIT"
] | 87 |
2022-02-23T12:10:53.000Z
|
2022-03-31T11:29:05.000Z
|
Packs/HealthCheck/Scripts/HealthCheckIncidentsCreatedMonthly/HealthCheckIncidentsCreatedMonthly.py
|
henry-sue-pa/content
|
043c6badfb4f9c80673cad9242fdea72efe301f7
|
[
"MIT"
] | 2 |
2022-01-05T15:27:01.000Z
|
2022-02-01T19:27:43.000Z
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
ctx = demisto.context()
dataFromCtx = ctx.get("widgets")
if not dataFromCtx:
incident = demisto.incidents()[0]
accountName = incident.get('account')
accountName = f"acc_{accountName}" if accountName != "" else ""
stats = demisto.executeCommand(
"demisto-api-post",
{
"uri": f"{accountName}/statistics/widgets/query",
"body": {
"size": 13,
"dataType": "incidents",
"query": "",
"dateRange": {
"period": {
"byFrom": "months",
"fromValue": 12
}
},
"widgetType": "line",
"params": {
"groupBy": [
"occurred(m)",
"null"
],
"timeFrame": "months"
},
},
})
res = stats[0]["Contents"]["response"]
buildNumber = demisto.executeCommand("DemistoVersion", {})[0]['Contents']['DemistoVersion']['buildNumber']
buildNumber = f'{buildNumber}' if buildNumber != "REPLACE_THIS_WITH_CI_BUILD_NUM" else "618658"
if int(buildNumber) >= 618657:
# Line graph:
data = {
"Type": 17,
"ContentsFormat": "line",
"Contents": {
"stats": res,
"params": {
"timeFrame": "months"
}
}
}
else:
# Bar graph:
output = []
for entry in res:
output.append({"name": entry["name"], "data": entry["data"]})
data = {
"Type": 17,
"ContentsFormat": "bar",
"Contents": {
"stats": output,
"params": {
"layout": "horizontal"
}
}
}
demisto.results(data)
else:
data = {
"Type": 17,
"ContentsFormat": "line",
"Contents": {
"stats": dataFromCtx['IncidentsCreatedMonthly'],
"params": {
"timeFrame": "months"
}
}
}
demisto.results(data)
| 27.452381 | 110 | 0.423677 |
912f4cb2d5b6031823d833fa3533c0b3fca9c0fd
| 13,099 |
py
|
Python
|
Bert_training.py
|
qzlydao/Bert_Sentiment_Analysis
|
2da2d0c6da2cdb55f37ff0a7e95f0ea4876b2d61
|
[
"Apache-2.0"
] | null | null | null |
Bert_training.py
|
qzlydao/Bert_Sentiment_Analysis
|
2da2d0c6da2cdb55f37ff0a7e95f0ea4876b2d61
|
[
"Apache-2.0"
] | null | null | null |
Bert_training.py
|
qzlydao/Bert_Sentiment_Analysis
|
2da2d0c6da2cdb55f37ff0a7e95f0ea4876b2d61
|
[
"Apache-2.0"
] | null | null | null |
from torch.utils.data import DataLoader
from dataset.wiki_dataset import BERTDataset
from models.bert_model import *
from tqdm import tqdm
import numpy as np
import pandas as pd
import os
config = {}
config['train_corpus_path'] = './corpus/train_wiki.txt'
config['test_corpus_path'] = './corpus/test_wiki.txt'
config['word2idx_path'] = './corpus/bert_word2idx_extend.json'
config['output_path'] = './output_wiki_bert'
config['batch_size'] = 1
config['max_seq_len'] = 200
config['vocab_size'] = 32162
config['lr'] = 2e-6
config['num_workers'] = 0
if __name__ == '__main__':
start_epoch = 3
train_epoches = 1
trainer = init_trainer(config['lr'], load_model=True)
all_loss = []
threshold = 0
patient = 10
best_f1 = 0
dynamic_lr = config['lr']
# todo start_epoch 3
for epoch in range(start_epoch, start_epoch + train_epoches):
print('train with learning rate {}'.format(str(dynamic_lr)))
trainer.train(epoch)
trainer.save_state_dict(trainer.bert_model, epoch, dir_path=config['output_path'],
file_path='bert.model')
trainer.test(epoch)
| 41.062696 | 130 | 0.579128 |
913073679e4abf540c0706db4723633ae6619d7d
| 5,757 |
py
|
Python
|
python/triton/language/random.py
|
appliedml85/triton
|
8bedcce9befbbe95d8fe0a082718edc4050e2831
|
[
"MIT"
] | 1 |
2021-09-03T15:58:49.000Z
|
2021-09-03T15:58:49.000Z
|
python/triton/language/random.py
|
appliedml85/triton
|
8bedcce9befbbe95d8fe0a082718edc4050e2831
|
[
"MIT"
] | null | null | null |
python/triton/language/random.py
|
appliedml85/triton
|
8bedcce9befbbe95d8fe0a082718edc4050e2831
|
[
"MIT"
] | null | null | null |
import triton
import triton.language as tl
# Notes
# 1. triton doesn't support uint32, so we use int32 instead and benefit from the fact that two's complement operations are equivalent to uint operations.
# 2. multiply_low_high is currently inefficient.
# 3. Even though technically philox sampling outputs int, in many places we pretends they were actualy uints e.g. uint_to_uniform_float
| 27.545455 | 153 | 0.639396 |
9130c59838ca9f05494c451b3ac65479a741bec6
| 265 |
py
|
Python
|
pyctcdecode/__init__.py
|
kensho-technologies/pyctcdecode
|
c33f94bce283ea9af79d30e2b815e3bf34a137c9
|
[
"Apache-2.0"
] | 203 |
2021-06-08T22:49:56.000Z
|
2022-03-31T11:55:21.000Z
|
pyctcdecode/__init__.py
|
kensho-technologies/pyctcdecode
|
c33f94bce283ea9af79d30e2b815e3bf34a137c9
|
[
"Apache-2.0"
] | 40 |
2021-06-11T20:58:07.000Z
|
2022-03-23T10:58:27.000Z
|
pyctcdecode/__init__.py
|
kensho-technologies/pyctcdecode
|
c33f94bce283ea9af79d30e2b815e3bf34a137c9
|
[
"Apache-2.0"
] | 39 |
2021-06-09T21:03:35.000Z
|
2022-03-26T13:14:23.000Z
|
# Copyright 2021-present Kensho Technologies, LLC.
from .alphabet import Alphabet # noqa
from .decoder import BeamSearchDecoderCTC, build_ctcdecoder # noqa
from .language_model import LanguageModel # noqa
__package_name__ = "pyctcdecode"
__version__ = "0.3.0"
| 29.444444 | 67 | 0.792453 |
9130dea9e896e245175b22a313c12b30eff43fdf
| 137 |
py
|
Python
|
wumpus/start_server.py
|
marky1991/Legend-of-Wumpus
|
b53f4a520cea274ddb4c40c6ab4f42a68008896f
|
[
"MIT"
] | null | null | null |
wumpus/start_server.py
|
marky1991/Legend-of-Wumpus
|
b53f4a520cea274ddb4c40c6ab4f42a68008896f
|
[
"MIT"
] | null | null | null |
wumpus/start_server.py
|
marky1991/Legend-of-Wumpus
|
b53f4a520cea274ddb4c40c6ab4f42a68008896f
|
[
"MIT"
] | null | null | null |
from wumpus.server import Server
from circuits import Debugger
s = Server("0.0.0.0", 50551) + Debugger()
s.run()
import sys
sys.exit(1)
| 17.125 | 41 | 0.722628 |
9130e2a60db5f7dd70d5dc6252d49d770a1edb17
| 6,567 |
py
|
Python
|
platypush/backend/joystick/linux/__init__.py
|
BlackLight/platypush
|
6c0a8bf2599eb4ab41a6122dbd988075d8b1a63a
|
[
"MIT"
] | 228 |
2018-01-30T11:17:09.000Z
|
2022-03-24T11:22:26.000Z
|
platypush/backend/joystick/linux/__init__.py
|
BlackLight/platypush
|
6c0a8bf2599eb4ab41a6122dbd988075d8b1a63a
|
[
"MIT"
] | 167 |
2017-12-11T19:35:38.000Z
|
2022-03-27T14:45:30.000Z
|
platypush/backend/joystick/linux/__init__.py
|
BlackLight/runbullet
|
8d26c8634d2677b4402f0a21b9ab8244b44640db
|
[
"MIT"
] | 16 |
2018-05-03T07:31:56.000Z
|
2021-12-05T19:27:37.000Z
|
import array
import struct
import time
from fcntl import ioctl
from typing import IO
from platypush.backend import Backend
from platypush.message.event.joystick import JoystickConnectedEvent, JoystickDisconnectedEvent, \
JoystickButtonPressedEvent, JoystickButtonReleasedEvent, JoystickAxisEvent
| 34.563158 | 115 | 0.536166 |
91319c7d1f44146497a5047e81aae4b710f7a353
| 10,043 |
py
|
Python
|
src/modules/sensors/vehicle_magnetometer/mag_compensation/python/mag_compensation.py
|
SaxionMechatronics/Firmware
|
7393d5d7610dc8d2cb64d90a5359b6c561fb642a
|
[
"BSD-3-Clause"
] | 4,224 |
2015-01-02T11:51:02.000Z
|
2020-10-27T23:42:28.000Z
|
src/modules/sensors/vehicle_magnetometer/mag_compensation/python/mag_compensation.py
|
SaxionMechatronics/Firmware
|
7393d5d7610dc8d2cb64d90a5359b6c561fb642a
|
[
"BSD-3-Clause"
] | 11,736 |
2015-01-01T11:59:16.000Z
|
2020-10-28T17:13:38.000Z
|
src/modules/sensors/vehicle_magnetometer/mag_compensation/python/mag_compensation.py
|
SaxionMechatronics/Firmware
|
7393d5d7610dc8d2cb64d90a5359b6c561fb642a
|
[
"BSD-3-Clause"
] | 11,850 |
2015-01-02T14:54:47.000Z
|
2020-10-28T16:42:47.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
File: mag_compensation.py
Author: Tanja Baumann
Email: [email protected]
Github: https://github.com/baumanta
Description:
Computes linear coefficients for mag compensation from thrust and current
Usage:
python mag_compensation.py /path/to/log/logfile.ulg current --instance 1
Remark:
If your logfile does not contain some of the topics, e.g.battery_status/current_a
you will have to comment out the corresponding parts in the script
"""
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
from pyulog import ULog
from pyulog.px4 import PX4ULog
from pylab import *
import numpy as np
import textwrap as tw
import argparse
#arguments
parser = argparse.ArgumentParser(description='Calculate compensation parameters from ulog')
parser.add_argument('logfile', type=str, nargs='?', default=[],
help='full path to ulog file')
parser.add_argument('type', type=str, nargs='?', choices=['current', 'thrust'], default=[],
help='Power signal used for compensation, supported is "current" or "thrust".')
parser.add_argument('--instance', type=int, nargs='?', default=0,
help='instance of the current or thrust signal to use (0 or 1)')
args = parser.parse_args()
log_name = args.logfile
comp_type = args.type
comp_instance = args.instance
#Load the log data (produced by pyulog)
log = ULog(log_name)
pxlog = PX4ULog(log)
# Select msgs and copy into arrays
armed = get_data('vehicle_status', 'arming_state', 0)
t_armed = ms2s_list(get_data('vehicle_status', 'timestamp', 0))
if comp_type == "thrust":
power = get_data('vehicle_rates_setpoint', 'thrust_body[2]', comp_instance)
power_t = ms2s_list(get_data('vehicle_rates_setpoint', 'timestamp', comp_instance))
comp_type_param = 1
factor = 1
unit = "[G]"
elif comp_type == "current":
power = get_data('battery_status', 'current_a', comp_instance)
power = np.true_divide(power, 1000) #kA
power_t = ms2s_list(get_data('battery_status', 'timestamp', comp_instance))
comp_type_param = 2 + comp_instance
factor = -1
unit = "[G/kA]"
else:
print("unknown compensation type {}. Supported is either 'thrust' or 'current'.".format(comp_type))
sys.exit(1)
if len(power) == 0:
print("could not retrieve power signal from log, zero data points")
sys.exit(1)
mag0X_body = get_data('sensor_mag', 'x', 0)
mag0Y_body = get_data('sensor_mag', 'y', 0)
mag0Z_body = get_data('sensor_mag', 'z', 0)
t_mag0 = ms2s_list(get_data('sensor_mag', 'timestamp', 0))
mag0_ID = get_data('sensor_mag', 'device_id', 0)
mag1X_body = get_data('sensor_mag', 'x', 1)
mag1Y_body = get_data('sensor_mag', 'y', 1)
mag1Z_body = get_data('sensor_mag', 'z', 1)
t_mag1 = ms2s_list(get_data('sensor_mag', 'timestamp', 1))
mag1_ID = get_data('sensor_mag', 'device_id', 1)
mag2X_body = get_data('sensor_mag', 'x', 2)
mag2Y_body = get_data('sensor_mag', 'y', 2)
mag2Z_body = get_data('sensor_mag', 'z', 2)
t_mag2 = ms2s_list(get_data('sensor_mag', 'timestamp', 2))
mag2_ID = get_data('sensor_mag', 'device_id', 2)
mag3X_body = get_data('sensor_mag', 'x', 3)
mag3Y_body = get_data('sensor_mag', 'y', 3)
mag3Z_body = get_data('sensor_mag', 'z', 3)
t_mag3 = ms2s_list(get_data('sensor_mag', 'timestamp', 3))
mag3_ID = get_data('sensor_mag', 'device_id', 3)
magX_body = []
magY_body = []
magZ_body = []
mag_id = []
t_mag = []
if len(mag0X_body) > 0:
magX_body.append(mag0X_body)
magY_body.append(mag0Y_body)
magZ_body.append(mag0Z_body)
t_mag.append(t_mag0)
mag_id.append(mag0_ID[0])
if len(mag1X_body) > 0:
magX_body.append(mag1X_body)
magY_body.append(mag1Y_body)
magZ_body.append(mag1Z_body)
t_mag.append(t_mag1)
mag_id.append(mag1_ID[0])
if len(mag2X_body) > 0:
magX_body.append(mag2X_body)
magY_body.append(mag2Y_body)
magZ_body.append(mag2Z_body)
t_mag.append(t_mag2)
mag_id.append(mag2_ID[0])
if len(mag3X_body) > 0:
magX_body.append(mag3X_body)
magY_body.append(mag3Y_body)
magZ_body.append(mag3Z_body)
t_mag.append(t_mag3)
mag_id.append(mag3_ID[0])
n_mag = len(magX_body)
#log index does not necessarily match mag calibration instance number
calibration_instance = []
instance_found = False
for idx in range(n_mag):
instance_found = False
for j in range(4):
if mag_id[idx] == log.initial_parameters["CAL_MAG{}_ID".format(j)]:
calibration_instance.append(j)
instance_found = True
if not instance_found:
print('Mag {} calibration instance not found, run compass calibration first.'.format(mag_id[idx]))
#get first arming sequence from data
start_time = 0
stop_time = 0
for i in range(len(armed)-1):
if armed[i] == 1 and armed[i+1] == 2:
start_time = t_armed[i+1]
if armed[i] == 2 and armed[i+1] == 1:
stop_time = t_armed[i+1]
break
#cut unarmed sequences from mag data
index_start = 0
index_stop = 0
for idx in range(n_mag):
for i in range(len(t_mag[idx])):
if t_mag[idx][i] > start_time:
index_start = i
break
for i in range(len(t_mag[idx])):
if t_mag[idx][i] > stop_time:
index_stop = i -1
break
t_mag[idx] = t_mag[idx][index_start:index_stop]
magX_body[idx] = magX_body[idx][index_start:index_stop]
magY_body[idx] = magY_body[idx][index_start:index_stop]
magZ_body[idx] = magZ_body[idx][index_start:index_stop]
#resample data
power_resampled = []
for idx in range(n_mag):
power_resampled.append(interp(t_mag[idx], power_t, power))
#fit linear to get coefficients
px = []
py = []
pz = []
for idx in range(n_mag):
px_temp, res_x, _, _, _ = polyfit(power_resampled[idx], magX_body[idx], 1,full = True)
py_temp, res_y, _, _, _ = polyfit(power_resampled[idx], magY_body[idx], 1,full = True)
pz_temp, res_z, _, _, _ = polyfit(power_resampled[idx], magZ_body[idx], 1, full = True)
px.append(px_temp)
py.append(py_temp)
pz.append(pz_temp)
#print to console
for idx in range(n_mag):
print('Mag{} device ID {} (calibration instance {})'.format(idx, mag_id[idx], calibration_instance[idx]))
print('\033[91m \n{}-based compensation: \033[0m'.format(comp_type))
print('\nparam set CAL_MAG_COMP_TYP {}'.format(comp_type_param))
for idx in range(n_mag):
print('\nparam set CAL_MAG{}_XCOMP {:.3f}'.format(calibration_instance[idx], factor * px[idx][0]))
print('param set CAL_MAG{}_YCOMP {:.3f}'.format(calibration_instance[idx], factor * py[idx][0]))
print('param set CAL_MAG{}_ZCOMP {:.3f}'.format(calibration_instance[idx], factor * pz[idx][0]))
#plot data
for idx in range(n_mag):
fig = plt.figure(num=None, figsize=(25, 14), dpi=80, facecolor='w', edgecolor='k')
fig.suptitle('Compensation Parameter Fit \n{} \nmag {} ID: {} (calibration instance {})'.format(log_name, idx, mag_id[idx], calibration_instance[idx]), fontsize=14, fontweight='bold')
plt.subplot(1,3,1)
plt.plot(power_resampled[idx], magX_body[idx], 'yo', power_resampled[idx], px[idx][0]*power_resampled[idx]+px[idx][1], '--k')
plt.xlabel('current [kA]')
plt.ylabel('mag X [G]')
plt.subplot(1,3,2)
plt.plot(power_resampled[idx], magY_body[idx], 'yo', power_resampled[idx], py[idx][0]*power_resampled[idx]+py[idx][1], '--k')
plt.xlabel('current [kA]')
plt.ylabel('mag Y [G]')
plt.subplot(1,3,3)
plt.plot(power_resampled[idx], magZ_body[idx], 'yo', power_resampled[idx], pz[idx][0]*power_resampled[idx]+pz[idx][1], '--k')
plt.xlabel('current [kA]')
plt.ylabel('mag Z [G]')
# display results
plt.figtext(0.24, 0.03, 'CAL_MAG{}_XCOMP: {:.3f} {}'.format(calibration_instance[idx],factor * px[idx][0],unit), horizontalalignment='center', fontsize=12, multialignment='left', bbox=dict(boxstyle="round", facecolor='#D8D8D8', ec="0.5", pad=0.5, alpha=1), fontweight='bold')
plt.figtext(0.51, 0.03, 'CAL_MAG{}_YCOMP: {:.3f} {}'.format(calibration_instance[idx],factor * py[idx][0],unit), horizontalalignment='center', fontsize=12, multialignment='left', bbox=dict(boxstyle="round", facecolor='#D8D8D8', ec="0.5", pad=0.5, alpha=1), fontweight='bold')
plt.figtext(0.79, 0.03, 'CAL_MAG{}_ZCOMP: {:.3f} {}'.format(calibration_instance[idx],factor * pz[idx][0],unit), horizontalalignment='center', fontsize=12, multialignment='left', bbox=dict(boxstyle="round", facecolor='#D8D8D8', ec="0.5", pad=0.5, alpha=1), fontweight='bold')
#compensation comparison plots
for idx in range(n_mag):
fig = plt.figure(num=None, figsize=(25, 14), dpi=80, facecolor='w', edgecolor='k')
fig.suptitle('Original Data vs. Compensation \n{}\nmag {} ID: {} (calibration instance {})'.format(log_name, idx, mag_id[idx], calibration_instance[idx]), fontsize=14, fontweight='bold')
plt.subplot(3,1,1)
original_x, = plt.plot(t_mag[idx], magX_body[idx], label='original')
power_x, = plt.plot(t_mag[idx],magX_body[idx] - px[idx][0] * power_resampled[idx], label='compensated')
plt.legend(handles=[original_x, power_x])
plt.xlabel('Time [s]')
plt.ylabel('Mag X corrected[G]')
plt.subplot(3,1,2)
original_y, = plt.plot(t_mag[idx], magY_body[idx], label='original')
power_y, = plt.plot(t_mag[idx],magY_body[idx] - py[idx][0] * power_resampled[idx], label='compensated')
plt.legend(handles=[original_y, power_y])
plt.xlabel('Time [s]')
plt.ylabel('Mag Y corrected[G]')
plt.subplot(3,1,3)
original_z, = plt.plot(t_mag[idx], magZ_body[idx], label='original')
power_z, = plt.plot(t_mag[idx],magZ_body[idx] - pz[idx][0] * power_resampled[idx], label='compensated')
plt.legend(handles=[original_z, power_z])
plt.xlabel('Time [s]')
plt.ylabel('Mag Z corrected[G]')
plt.show()
| 36.787546 | 279 | 0.685054 |
9131fc3d32e64957b9b0364448ce5a53a40a4303
| 110 |
py
|
Python
|
app.py
|
19857625778/watchlist
|
284e3f814394d0fda6e262ab84177a493027c19e
|
[
"MIT"
] | null | null | null |
app.py
|
19857625778/watchlist
|
284e3f814394d0fda6e262ab84177a493027c19e
|
[
"MIT"
] | null | null | null |
app.py
|
19857625778/watchlist
|
284e3f814394d0fda6e262ab84177a493027c19e
|
[
"MIT"
] | null | null | null |
from flask import Flask
app = Flask(_name_)
| 18.333333 | 36 | 0.690909 |
913206ffbcd62d973e6003afaac405c6a7ea1d3b
| 524 |
py
|
Python
|
portfolio_optimization/constants.py
|
AI-Traiding-Team/paired_trading
|
72d4dd0071314e2f0efaa26931ca7339199fc998
|
[
"MIT"
] | 1 |
2022-03-26T23:21:51.000Z
|
2022-03-26T23:21:51.000Z
|
portfolio_optimization/constants.py
|
AI-Traiding-Team/paired_trading
|
72d4dd0071314e2f0efaa26931ca7339199fc998
|
[
"MIT"
] | null | null | null |
portfolio_optimization/constants.py
|
AI-Traiding-Team/paired_trading
|
72d4dd0071314e2f0efaa26931ca7339199fc998
|
[
"MIT"
] | 3 |
2021-12-07T07:39:43.000Z
|
2022-01-24T05:05:55.000Z
|
import os
path1 = "outputs"
path2 = "outputs/_imgs"
path3 = "outputs/max_sharpe_weights"
path4 = "outputs/opt_portfolio_trades"
try:
os.mkdir(path1)
except OSError:
print (" %s " % path1)
else:
print (" %s " % path1)
try:
os.makedirs(path2)
os.makedirs(path3)
os.makedirs(path4)
except OSError:
print (" ")
else:
print (" ")
source_path = '../source_root/1m'
destination_path = 'outputs'
| 20.153846 | 52 | 0.704198 |
91329b52b2eb8891b64c02d1b241dca7cd47466e
| 26,007 |
py
|
Python
|
mypy/transformtype.py
|
silky/mypy
|
de6a8d3710df9f49109cb682f2092e4967bfb92c
|
[
"PSF-2.0"
] | 1 |
2019-06-27T11:34:27.000Z
|
2019-06-27T11:34:27.000Z
|
mypy/transformtype.py
|
silky/mypy
|
de6a8d3710df9f49109cb682f2092e4967bfb92c
|
[
"PSF-2.0"
] | null | null | null |
mypy/transformtype.py
|
silky/mypy
|
de6a8d3710df9f49109cb682f2092e4967bfb92c
|
[
"PSF-2.0"
] | null | null | null |
"""Transform classes for runtime type checking."""
from typing import Undefined, List, Set, Any, cast, Tuple, Dict
from mypy.nodes import (
TypeDef, Node, FuncDef, VarDef, Block, Var, ExpressionStmt,
TypeInfo, SuperExpr, NameExpr, CallExpr, MDEF, MemberExpr, ReturnStmt,
AssignmentStmt, TypeExpr, PassStmt, SymbolTableNode
)
from mypy import nodes
from mypy.semanal import self_type
from mypy.types import (
Callable, Instance, Type, AnyType, BOUND_VAR, Void, RuntimeTypeVar,
UnboundType
)
from mypy.checkmember import analyse_member_access
from mypy.checkexpr import type_object_type
from mypy.subtypes import map_instance_to_supertype
import mypy.transform
from mypy.transformfunc import FuncTransformer
from mypy.transutil import (
self_expr, tvar_slot_name, tvar_arg_name, prepend_arg_type
)
from mypy.rttypevars import translate_runtime_type_vars_locally
from mypy.compileslotmap import find_slot_origin
from mypy.coerce import coerce
from mypy.maptypevar import num_slots, get_tvar_access_path
from mypy import erasetype
| 39.285498 | 79 | 0.560541 |
9133898a696d152066b05333d948605fe60ac07e
| 988 |
py
|
Python
|
jazzpos/admin.py
|
AhmadManzoor/jazzpos
|
7b771095b8df52d036657f33f36a97efb575d36c
|
[
"MIT"
] | 5 |
2015-12-05T15:39:51.000Z
|
2020-09-16T20:14:29.000Z
|
jazzpos/admin.py
|
AhmadManzoor/jazzpos
|
7b771095b8df52d036657f33f36a97efb575d36c
|
[
"MIT"
] | null | null | null |
jazzpos/admin.py
|
AhmadManzoor/jazzpos
|
7b771095b8df52d036657f33f36a97efb575d36c
|
[
"MIT"
] | 2 |
2019-11-23T17:47:46.000Z
|
2022-01-14T11:05:21.000Z
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django_tablib.admin import TablibAdmin
from jazzpos.models import Customer, Patient, Store, CustomerType, StoreSettings
from jazzpos.models import UserProfile
UserAdmin.inlines = [UserProfileInline,]
admin.site.register(Customer, CustomerAdmin)
admin.site.register(Patient, PatientAdmin)
admin.site.register(Store, StoreAdmin)
admin.site.register(StoreSettings, StoreSettingsAdmin)
admin.site.register(CustomerType, CustomerTypeAdmin)
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
| 26 | 80 | 0.798583 |
9134284163c48ec784f6cf8bb5ff49c9902c49ec
| 2,760 |
py
|
Python
|
classification/imaterialist_challenge_furniture_2018/configs/train/train_inceptionresnetv2_350_ssd_like_v3.py
|
vfdev-5/ignite-examples
|
fb15b59e2b159e1e2bc4628f8756055e9154f5c8
|
[
"MIT"
] | 11 |
2018-04-07T17:49:58.000Z
|
2022-03-15T07:18:18.000Z
|
classification/imaterialist_challenge_furniture_2018/configs/train/train_inceptionresnetv2_350_ssd_like_v3.py
|
vfdev-5/ignite-examples
|
fb15b59e2b159e1e2bc4628f8756055e9154f5c8
|
[
"MIT"
] | null | null | null |
classification/imaterialist_challenge_furniture_2018/configs/train/train_inceptionresnetv2_350_ssd_like_v3.py
|
vfdev-5/ignite-examples
|
fb15b59e2b159e1e2bc4628f8756055e9154f5c8
|
[
"MIT"
] | null | null | null |
# Basic training configuration file
from torch.optim import RMSprop
from torch.optim.lr_scheduler import MultiStepLR
from torchvision.transforms import RandomHorizontalFlip, Compose
from torchvision.transforms import RandomResizedCrop, RandomAffine, RandomApply
from torchvision.transforms import ColorJitter, ToTensor, Normalize
from common.dataset import FilesFromCsvDataset
from common.data_loaders import get_data_loader
from models.inceptionresnetv2_ssd_like import FurnitureInceptionResNetV4350SSDLike_v3
SEED = 17
DEBUG = True
DEVICE = 'cuda'
OUTPUT_PATH = "output"
size = 350
TRAIN_TRANSFORMS = Compose([
RandomApply(
[RandomAffine(degrees=10, resample=3, fillcolor=(255, 255, 255)), ],
p=0.5
),
RandomResizedCrop(size, scale=(0.7, 1.0), interpolation=3),
RandomHorizontalFlip(p=0.5),
ColorJitter(hue=0.12, brightness=0.12),
ToTensor(),
Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
VAL_TRANSFORMS = TRAIN_TRANSFORMS
BATCH_SIZE = 24
NUM_WORKERS = 15
dataset = FilesFromCsvDataset("output/unique_filtered_train_dataset.csv")
TRAIN_LOADER = get_data_loader(dataset,
data_transform=TRAIN_TRANSFORMS,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
pin_memory='cuda' in DEVICE)
val_dataset = FilesFromCsvDataset("output/unique_filtered_val_dataset.csv")
VAL_LOADER = get_data_loader(val_dataset,
data_transform=VAL_TRANSFORMS,
batch_size=BATCH_SIZE,
num_workers=NUM_WORKERS,
pin_memory='cuda' in DEVICE)
MODEL = FurnitureInceptionResNetV4350SSDLike_v3(num_classes=128, pretrained='imagenet')
N_EPOCHS = 100
OPTIM = RMSprop(
params=[
{"params": MODEL.extractor.stem.parameters(), 'lr': 0.0001},
{"params": MODEL.extractor.low_features_a.parameters(), 'lr': 0.00045},
{"params": MODEL.extractor.low_features_b.parameters(), 'lr': 0.00045},
{"params": MODEL.extractor.mid_features.parameters(), 'lr': 0.0045},
{"params": MODEL.extractor.top_features.parameters(), 'lr': 0.0045},
{"params": MODEL.extractor.smooth_layers.parameters(), 'lr': 0.045},
{"params": MODEL.cls_layers.parameters(), 'lr': 0.045},
{"params": MODEL.boxes_to_classes.parameters(), 'lr': 0.045},
{"params": MODEL.final_classifier.parameters(), 'lr': 0.045},
],
alpha=0.9,
eps=1.0
)
LR_SCHEDULERS = [
MultiStepLR(OPTIM, milestones=[4, 5, 6, 7, 8, 10, 11, 13, 14, 15], gamma=0.5),
]
EARLY_STOPPING_KWARGS = {
'patience': 25,
# 'score_function': None
}
LOG_INTERVAL = 100
| 30 | 87 | 0.659058 |
913439b2a09a820bfc3faefc3e105469f128a1a8
| 1,352 |
py
|
Python
|
examples/qmmm/02-mcscf.py
|
QuESt-Calculator/pyscf
|
0ed03633b699505c7278f1eb501342667d0aa910
|
[
"Apache-2.0"
] | 501 |
2018-12-06T23:48:17.000Z
|
2022-03-31T11:53:18.000Z
|
examples/qmmm/02-mcscf.py
|
QuESt-Calculator/pyscf
|
0ed03633b699505c7278f1eb501342667d0aa910
|
[
"Apache-2.0"
] | 710 |
2018-11-26T22:04:52.000Z
|
2022-03-30T03:53:12.000Z
|
examples/qmmm/02-mcscf.py
|
QuESt-Calculator/pyscf
|
0ed03633b699505c7278f1eb501342667d0aa910
|
[
"Apache-2.0"
] | 273 |
2018-11-26T10:10:24.000Z
|
2022-03-30T12:25:28.000Z
|
#!/usr/bin/env python
#
# Author: Qiming Sun <[email protected]>
#
'''
A simple example to run MCSCF with background charges.
'''
import numpy
from pyscf import gto, scf, mcscf, qmmm
mol = gto.M(atom='''
C 1.1879 -0.3829 0.0000
C 0.0000 0.5526 0.0000
O -1.1867 -0.2472 0.0000
H -1.9237 0.3850 0.0000
H 2.0985 0.2306 0.0000
H 1.1184 -1.0093 0.8869
H 1.1184 -1.0093 -0.8869
H -0.0227 1.1812 0.8852
H -0.0227 1.1812 -0.8852
''',
basis='3-21g',
verbose=4)
numpy.random.seed(1)
coords = numpy.random.random((5,3)) * 10
charges = (numpy.arange(5) + 1.) * -.1
#
# There are two ways to add background charges to MCSCF method.
# The recommended one is to initialize it in SCF calculation. The MCSCF
# calculation takes the information from SCF objects.
#
mf = qmmm.mm_charge(scf.RHF(mol), coords, charges).run()
mc = mcscf.CASSCF(mf, 6, 6)
mc.run()
mc = mcscf.CASCI(mf, 6, 6)
mc.run()
#
# The other method is to patch the MCSCF object with the background charges.
# Note: it updates the underlying SCF object inplace.
#
mo_init = mf.mo_coeff
mf = scf.RHF(mol)
mc = mcscf.CASSCF(mf, 6, 6)
mc = qmmm.mm_charge(mc, coords, charges)
mc.run(mo_init)
mf = scf.RHF(mol)
mc = mcscf.CASCI(mf, 6, 6)
mc = qmmm.mm_charge(mc, coords, charges)
mc.run(mo_init)
| 22.915254 | 76 | 0.637574 |
9134f2e89b9311a8b0265758c45e89d220be134a
| 5,948 |
py
|
Python
|
mtp_send_money/apps/send_money/utils.py
|
uk-gov-mirror/ministryofjustice.money-to-prisoners-send-money
|
80db0cf5f384f93d35387a757605cfddbc98935f
|
[
"MIT"
] | null | null | null |
mtp_send_money/apps/send_money/utils.py
|
uk-gov-mirror/ministryofjustice.money-to-prisoners-send-money
|
80db0cf5f384f93d35387a757605cfddbc98935f
|
[
"MIT"
] | null | null | null |
mtp_send_money/apps/send_money/utils.py
|
uk-gov-mirror/ministryofjustice.money-to-prisoners-send-money
|
80db0cf5f384f93d35387a757605cfddbc98935f
|
[
"MIT"
] | null | null | null |
import datetime
from decimal import Decimal, ROUND_DOWN, ROUND_UP
import logging
import re
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
from django.utils import formats
from django.utils.cache import patch_cache_control
from django.utils.dateformat import format as format_date
from django.utils.dateparse import parse_date
from django.utils.encoding import force_text
from django.utils.translation import gettext_lazy as _
from django.views.generic import TemplateView
from mtp_common.auth import api_client, urljoin
import requests
from requests.exceptions import Timeout
logger = logging.getLogger('mtp')
prisoner_number_re = re.compile(r'^[a-z]\d\d\d\d[a-z]{2}$', re.IGNORECASE)
def currency_format(amount, trim_empty_pence=False):
"""
Formats a number into currency format
@param amount: amount in pounds
@param trim_empty_pence: if True, strip off .00
"""
if not isinstance(amount, Decimal):
amount = unserialise_amount(amount)
text_amount = serialise_amount(amount)
if trim_empty_pence and text_amount.endswith('.00'):
text_amount = text_amount[:-3]
return '' + text_amount
def currency_format_pence(amount, trim_empty_pence=False):
"""
Formats a number into currency format display pence only as #p
@param amount: amount in pounds
@param trim_empty_pence: if True, strip off .00
"""
if not isinstance(amount, Decimal):
amount = unserialise_amount(amount)
if amount.__abs__() < Decimal('1'):
return '%sp' % (amount * Decimal('100')).to_integral_value()
return currency_format(amount, trim_empty_pence=trim_empty_pence)
def clamp_amount(amount):
"""
Round the amount to integer pence,
rounding fractional pence up (away from zero) for any fractional pence value
that is greater than or equal to a tenth of a penny.
@param amount: Decimal amount to round
"""
tenths_of_pennies = (amount * Decimal('1000')).to_integral_value(rounding=ROUND_DOWN)
pounds = tenths_of_pennies / Decimal('1000')
return pounds.quantize(Decimal('1.00'), rounding=ROUND_UP)
def make_response_cacheable(response):
"""
Allow response to be public and cached for an hour
"""
patch_cache_control(response, public=True, max_age=3600)
return response
| 30.659794 | 102 | 0.714526 |
913583ff296aa5d8ee63fb484726346d1b7a2c13
| 151 |
py
|
Python
|
src/zmbrelev/config.py
|
Zumbi-ML/zmbRELEV
|
e6a6f789804d7230415f390da905e94ae2ab27f5
|
[
"BSD-2-Clause"
] | null | null | null |
src/zmbrelev/config.py
|
Zumbi-ML/zmbRELEV
|
e6a6f789804d7230415f390da905e94ae2ab27f5
|
[
"BSD-2-Clause"
] | null | null | null |
src/zmbrelev/config.py
|
Zumbi-ML/zmbRELEV
|
e6a6f789804d7230415f390da905e94ae2ab27f5
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: UTF-8 -*-
import os
this_file_path = os.path.dirname(os.path.realpath(__file__))
MODELS_DIR = os.path.join(this_file_path, "models/")
| 18.875 | 60 | 0.708609 |
9136160d5624a0c97151f5a92ef4449fe0be2b28
| 1,951 |
py
|
Python
|
ArraysP2.py
|
EdgarVallejo96/pyEdureka
|
f103f67ed4f9eee6ab924237e9d94a489e602c7c
|
[
"MIT"
] | null | null | null |
ArraysP2.py
|
EdgarVallejo96/pyEdureka
|
f103f67ed4f9eee6ab924237e9d94a489e602c7c
|
[
"MIT"
] | null | null | null |
ArraysP2.py
|
EdgarVallejo96/pyEdureka
|
f103f67ed4f9eee6ab924237e9d94a489e602c7c
|
[
"MIT"
] | null | null | null |
import array as arr
a = arr.array('i', [ 1,2,3,4,5,6])
print(a)
# Accessing elements
print(a[2])
print(a[-2])
# BASIC ARRAY OPERATIONS
# Find length of array
print()
print('Length of array')
print(len(a))
# Adding elments to an array
# append() to add a single element at the end of an array
# extend() to add more than one element at the end of an array
# insert() to add an element at a specific position in an array
print()
# append
print('Append')
a.append(8)
print(a)
# extend
print()
print('Extend')
a.extend([9,8,6,5,4])
print(a)
# insert
print()
print('Insert')
a.insert(2,6) # first param is the index, second param is the value
print(a)
# Removing elements from an array
# pop() Remove an element and return it
# remove() Remove element with a specific value without returning it
print()
print(a)
# pop
print('pop')
print(a.pop()) # removes last element
print(a)
print(a.pop(2))
print(a)
print(a.pop(-1))
print(a)
# remove
print()
print('remove')
print(a.remove(8)) # doesn't return what it removes, it removed the first occurrence of '8'
print(a)
# Array Concatenation
print()
print('Array Concatenation')
b = arr.array('i', [1,2,3,4,5,6,7])
c = arr.array('i', [3,4,2,1,3,5,6,7,8])
d = arr.array('i')
d = b + c
print(d)
# Slicing an Array
print()
print('Slicing an Array') # This means fetching some particular values from an array
print(d)
print(d[0:5]) # Doesn't include the value on the right index
print(d[0:-2])
print(d[::-1]) # Reverse the array, this method is not preferred because it exauhsts the memory
# Looping through an Array
print()
print('Looping through an Array')
print('Using for')
for x in d:
print(x, end=' ')
print()
for x in d[0:-3]:
print(x, end=' ')
print()
print('Using while')
temp = 0
while temp < d[2]:
print(d[temp], end = ' ')
temp = temp + 1 # Can use temp+=1, it's the same thing
print()
print(a)
tem = 0
while tem < len(a):
print(a[tem], end=' ')
tem += 1
print()
| 18.759615 | 95 | 0.664787 |
9136706832c51a492458e311e9d6b0efd4abea13
| 2,931 |
py
|
Python
|
vesper/mpg_ranch/nfc_detector_low_score_classifier_1_0/classifier.py
|
RichardLitt/Vesper
|
5360844f42a06942e7684121c650b08cf8616285
|
[
"MIT"
] | 29 |
2017-07-10T14:49:15.000Z
|
2022-02-02T23:14:38.000Z
|
vesper/mpg_ranch/nfc_detector_low_score_classifier_1_0/classifier.py
|
Tubbz-alt/Vesper
|
76e5931ca0c7fbe070c53b1362ec246ec9007beb
|
[
"MIT"
] | 167 |
2015-03-17T14:45:22.000Z
|
2022-03-30T21:00:05.000Z
|
vesper/mpg_ranch/nfc_detector_low_score_classifier_1_0/classifier.py
|
Tubbz-alt/Vesper
|
76e5931ca0c7fbe070c53b1362ec246ec9007beb
|
[
"MIT"
] | 4 |
2015-02-06T03:30:27.000Z
|
2020-12-27T08:38:52.000Z
|
"""
Module containing low score classifier for MPG Ranch NFC detectors.
An instance of the `Classifier` class of this module assigns the `LowScore`
classification to a clip if the clip has no `Classification` annotation and
has a `DetectorScore` annotation whose value is less than a threshold.
This classifier is intended for use on clips created by the the
MPG Ranch Thrush Detector 1.0 and the MPG Ranch Tseep Detector 1.0.
"""
import logging
from vesper.command.annotator import Annotator
from vesper.django.app.models import AnnotationInfo, StringAnnotation
_logger = logging.getLogger()
_SCORE_THRESHOLDS = {
# For 50 percent precision on validation recordings.
'MPG Ranch Thrush Detector 1.0 40': 70,
'MPG Ranch Tseep Detector 1.0 20': 41,
# For 75 percent precision on validation recordings.
# 'MPG Ranch Thrush Detector 1.0 40': 91,
# 'MPG Ranch Tseep Detector 1.0 20': 63,
}
| 28.735294 | 77 | 0.616172 |
91374929866f2c29362313f46503faaf0a90ed51
| 1,506 |
py
|
Python
|
setup.py
|
yitzikc/athena2pd
|
d2d6b886a70e958f51d90103600572152eaa7bb9
|
[
"MIT"
] | 1 |
2020-04-05T18:41:17.000Z
|
2020-04-05T18:41:17.000Z
|
setup.py
|
yitzikc/athena2pd
|
d2d6b886a70e958f51d90103600572152eaa7bb9
|
[
"MIT"
] | null | null | null |
setup.py
|
yitzikc/athena2pd
|
d2d6b886a70e958f51d90103600572152eaa7bb9
|
[
"MIT"
] | 1 |
2021-04-22T09:22:31.000Z
|
2021-04-22T09:22:31.000Z
|
from setuptools import setup, find_packages
setup(
name='athena2pd',
packages=['athena2pd'],
version=find_version('athena2pd/__init__.py'),
description='Help\'s simplify the access of databases stored in Amazon Athena by using SQL and pandas DataFrames.',
long_description=get_long_desc('README.md'),
long_description_content_type='text/markdown',
author='Joe Dementri',
maintainer='Joe Dementri',
maintainer_email='[email protected]',
license='MIT',
install_requires=get_requirements('requirements.txt'),
zip_safe=False,
url='https://github.com/joedementri/athena2pd',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent'
],
python_requires='>=2.7,>=3.6'
)
| 33.466667 | 119 | 0.653386 |
913758ab43b30fce640a96f90c8472af68facfb1
| 117 |
py
|
Python
|
mmdet/core/ufp/__init__.py
|
PuAnysh/UFPMP-Det
|
6809b4f8de3aa1d013a3f86114bc3e8496d896a9
|
[
"Apache-2.0"
] | 9 |
2022-01-18T14:42:39.000Z
|
2022-02-14T02:57:02.000Z
|
mmdet/core/ufp/__init__.py
|
PuAnysh/UFPMP-Det
|
6809b4f8de3aa1d013a3f86114bc3e8496d896a9
|
[
"Apache-2.0"
] | 1 |
2022-03-28T11:51:49.000Z
|
2022-03-31T14:24:02.000Z
|
mmdet/core/ufp/__init__.py
|
PuAnysh/UFPMP-Det
|
6809b4f8de3aa1d013a3f86114bc3e8496d896a9
|
[
"Apache-2.0"
] | null | null | null |
from .spp import *
from .unified_foreground_packing import *
__all__ = [
'phsppog', 'UnifiedForegroundPacking'
]
| 16.714286 | 41 | 0.74359 |
913761b87b7ebbbec82bddc1bdba8144eb580e3d
| 436 |
py
|
Python
|
PythonBasics/ExamPreparation/FamilyTrip.py
|
achoraev/SoftUni
|
0cc7db470a096cc33bbe0ca6bd90060b79120573
|
[
"Apache-2.0"
] | null | null | null |
PythonBasics/ExamPreparation/FamilyTrip.py
|
achoraev/SoftUni
|
0cc7db470a096cc33bbe0ca6bd90060b79120573
|
[
"Apache-2.0"
] | null | null | null |
PythonBasics/ExamPreparation/FamilyTrip.py
|
achoraev/SoftUni
|
0cc7db470a096cc33bbe0ca6bd90060b79120573
|
[
"Apache-2.0"
] | null | null | null |
budget = float(input())
nights = int(input())
price_night = float(input())
percent_extra = int(input())
if nights > 7:
price_night = price_night - (price_night * 0.05)
sum = nights * price_night
total_sum = sum + (budget * percent_extra / 100)
if total_sum <= budget:
print(f"Ivanovi will be left with {(budget - total_sum):.2f} leva after vacation.")
else:
print(f"{(total_sum - budget):.2f} leva needed.")
| 29.066667 | 88 | 0.655963 |
91381ad1149218813852e6f68213b5362dda4a67
| 2,573 |
py
|
Python
|
tex_live_package_manager/progress.py
|
csch0/SublimeText-TeX-Live-Package-Manager
|
ab21bd49a945f611250613e9cb862a7703dc534f
|
[
"Unlicense",
"MIT"
] | 2 |
2018-11-03T16:15:59.000Z
|
2018-11-23T16:14:57.000Z
|
tex_live_package_manager/progress.py
|
csch0/SublimeText-TeX-Live-Package-Manager
|
ab21bd49a945f611250613e9cb862a7703dc534f
|
[
"Unlicense",
"MIT"
] | 1 |
2016-12-08T05:39:58.000Z
|
2016-12-08T05:39:58.000Z
|
tex_live_package_manager/progress.py
|
csch0/SublimeText-TeX-Live-Package-Manager
|
ab21bd49a945f611250613e9cb862a7703dc534f
|
[
"Unlicense",
"MIT"
] | null | null | null |
import sublime, sublime_plugin
import threading
def ProgressFunction(function, messages, callback):
t = ThreadThread(function)
t.start()
Progress(t, messages[0], messages[1], callback)
| 23.605505 | 86 | 0.68869 |
91382da4e9ec5e3e22d31caf7faabb09a28c2093
| 10,199 |
py
|
Python
|
moscow_routes_parser/t_mos_ru.py
|
rscprof/moscow_routes_parser
|
692627dd43d62f70e3e12a761897571c79a022a0
|
[
"MIT"
] | null | null | null |
moscow_routes_parser/t_mos_ru.py
|
rscprof/moscow_routes_parser
|
692627dd43d62f70e3e12a761897571c79a022a0
|
[
"MIT"
] | null | null | null |
moscow_routes_parser/t_mos_ru.py
|
rscprof/moscow_routes_parser
|
692627dd43d62f70e3e12a761897571c79a022a0
|
[
"MIT"
] | null | null | null |
import html
import json
import logging
import re
from abc import abstractmethod
from datetime import datetime, time
from typing import Optional
import requests
from moscow_routes_parser.model import Route, Timetable, Equipment, Timetable_builder
from moscow_routes_parser.model_impl import Timetable_builder_t_mos_ru
def get_route(date: datetime.date, id_route_t_mos_ru: str, direction: int,
get_route_url: str = 'https://transport.mos.ru/ru/ajax/App/ScheduleController/getRoute',
parser: parser_timetable = parser_timetable_t_mos_ru(builder=Timetable_builder_t_mos_ru())
) -> Timetable:
"""Get timetable for route by date and direction
:param date: date of timetable for route
:param id_route_t_mos_ru: id of route from t.mos.ru
:param direction: direction for route (0 or 1)
:param get_route_url URL for requesting timetable
:param parser for timetable
:return timetable for route by date and direction
"""
logger = logging.getLogger(__name__)
try:
# strange problem with SSL Cert in package
response = requests.get(get_route_url,
params={
'mgt_schedule[isNight]': '',
'mgt_schedule[date]': date.strftime("%d.%m.%Y"),
'mgt_schedule[route]': id_route_t_mos_ru,
'mgt_schedule[direction]': direction,
},
headers={'X-Requested-With': 'XMLHttpRequest'}
)
if response.status_code == 200:
logger.info("Get route #{}".format(id_route_t_mos_ru))
route_info = parser.parse(response.text)
else:
logger.error("Error status: {}".format(response.status_code))
route_info = None
except requests.exceptions.RequestException as e:
logger.error("Error " + str(e))
route_info = None
if not (route_info is None):
result = route_info.set_id_route_t_mos_ru(id_route_t_mos_ru).set_direction(direction).set_date(date).build()
if len(result.get_stops()) == 0: # Error of loading timetable without exceptions
result = None
else:
result = None
return result
def get_list_routes(work_time: int, direction: int,
parser: Parser_routes = None,
get_routes_url: str = 'https://transport.mos.ru/ru/ajax/App/ScheduleController/getRoutesList'
) -> Optional[list[Route]]:
"""get list routes by work_time and direction from transport.mos.ru
:param parser: function to parse got string
:param get_routes_url: url for requesting routes
:param work_time: work day or not (1 or 0)
:param direction: 0
:return list of Route
"""
if parser is None:
parser = Parser_routes_t_mos_ru()
page = 1
result_routes = []
finish = False
count = None
logger = logging.getLogger(__name__)
while not finish:
finish = False
repeat = True
while repeat:
repeat = False
try:
# strange problem with SSL Cert in package
response = requests.get(get_routes_url,
params={
'mgt_schedule[search]': '',
'mgt_schedule[isNight]': '',
# 'mgt_schedule[filters]': '',
'mgt_schedule[work_time]': work_time,
'page': page,
'mgt_schedule[direction]': direction,
}
, headers={'X-Requested-With': 'XMLHttpRequest'}
# , headers={'Cookie': "_ym_d=1637468102; _ym_uid=1637468102592825648; mos_id=rBEAAmGaFNawBwAOHRgWAgA=; _ga=GA1.2.1733238845.1637487830; uxs_uid=147e2110-500d-11ec-a7cb-8bb8b12c3186; KFP_DID=ee285837-cd1f-0a9b-c8a2-9cef6a4ee333; _ym_isad=2; _ym_visorc=w"}
)
if response.status_code == 200:
logger.info("Get page #{}".format(page))
routes = parser.parse(response.text)
result_routes += routes
if count is None:
count = parser.count
if not routes:
finish = True
else:
logger.error("Error status: {}".format(response.status_code))
finish = True
page = page + 1
if page > count:
finish = True
except requests.exceptions.RequestException as e:
logger.error("Error " + str(e))
repeat = True
return result_routes
| 42.144628 | 295 | 0.531817 |
913b82f09ffffabfd9cdacbe8830d13b360f655c
| 6,762 |
py
|
Python
|
web/api/get_summary_data.py
|
spudmind/spud
|
86e44bca4efd3cd6358467e1511048698a45edbc
|
[
"MIT"
] | 2 |
2015-04-11T12:22:41.000Z
|
2016-08-18T11:12:06.000Z
|
web/api/get_summary_data.py
|
spudmind/spud
|
86e44bca4efd3cd6358467e1511048698a45edbc
|
[
"MIT"
] | 84 |
2015-01-22T14:33:49.000Z
|
2015-04-01T23:15:29.000Z
|
web/api/get_summary_data.py
|
spudmind/spud
|
86e44bca4efd3cd6358467e1511048698a45edbc
|
[
"MIT"
] | 1 |
2015-04-16T03:10:39.000Z
|
2015-04-16T03:10:39.000Z
|
from web.api import BaseAPI
from utils import mongo
import json
| 40.981818 | 102 | 0.584147 |
913c3c69be248515aa6faa8629c29e1819e26c9e
| 21,616 |
py
|
Python
|
neutron/common/ovn/utils.py
|
guillermomolina/neutron
|
bd2933a2588d1e0b18790dd719ca1d89aa4a0c8d
|
[
"Apache-2.0"
] | 3 |
2021-02-17T09:49:14.000Z
|
2022-01-19T08:40:34.000Z
|
neutron/common/ovn/utils.py
|
guillermomolina/neutron
|
bd2933a2588d1e0b18790dd719ca1d89aa4a0c8d
|
[
"Apache-2.0"
] | null | null | null |
neutron/common/ovn/utils.py
|
guillermomolina/neutron
|
bd2933a2588d1e0b18790dd719ca1d89aa4a0c8d
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import inspect
import os
import re
import netaddr
from neutron_lib.api.definitions import external_net
from neutron_lib.api.definitions import extra_dhcp_opt as edo_ext
from neutron_lib.api.definitions import l3
from neutron_lib.api.definitions import port_security as psec
from neutron_lib.api.definitions import portbindings
from neutron_lib.api import validators
from neutron_lib import constants as const
from neutron_lib import context as n_context
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from neutron_lib.utils import net as n_utils
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import netutils
from oslo_utils import strutils
from ovsdbapp import constants as ovsdbapp_const
from neutron._i18n import _
from neutron.common.ovn import constants
from neutron.common.ovn import exceptions as ovn_exc
from neutron.db import models_v2
from neutron.objects import ports as ports_obj
LOG = log.getLogger(__name__)
CONF = cfg.CONF
DNS_RESOLVER_FILE = "/etc/resolv.conf"
AddrPairsDiff = collections.namedtuple(
'AddrPairsDiff', ['added', 'removed', 'changed'])
PortExtraDHCPValidation = collections.namedtuple(
'PortExtraDHCPValidation', ['failed', 'invalid_ipv4', 'invalid_ipv6'])
def validate_port_extra_dhcp_opts(port):
"""Validate port's extra DHCP options.
:param port: A neutron port.
:returns: A PortExtraDHCPValidation object.
"""
invalid = {const.IP_VERSION_4: [], const.IP_VERSION_6: []}
failed = False
for edo in port.get(edo_ext.EXTRADHCPOPTS, []):
ip_version = edo['ip_version']
opt_name = edo['opt_name']
# If DHCP is disabled for this port via this special option,
# always succeed the validation
if _is_dhcp_disabled(edo):
failed = False
break
if opt_name not in constants.SUPPORTED_DHCP_OPTS_MAPPING[ip_version]:
invalid[ip_version].append(opt_name)
failed = True
return PortExtraDHCPValidation(
failed=failed,
invalid_ipv4=invalid[const.IP_VERSION_4] if failed else [],
invalid_ipv6=invalid[const.IP_VERSION_6] if failed else [])
def get_revision_number(resource, resource_type):
"""Get the resource's revision number based on its type."""
if resource_type in (constants.TYPE_NETWORKS,
constants.TYPE_PORTS,
constants.TYPE_SECURITY_GROUP_RULES,
constants.TYPE_ROUTERS,
constants.TYPE_ROUTER_PORTS,
constants.TYPE_SECURITY_GROUPS,
constants.TYPE_FLOATINGIPS, constants.TYPE_SUBNETS):
return resource['revision_number']
else:
raise ovn_exc.UnknownResourceType(resource_type=resource_type)
def remove_macs_from_lsp_addresses(addresses):
"""Remove the mac addreses from the Logical_Switch_Port addresses column.
:param addresses: The list of addresses from the Logical_Switch_Port.
Example: ["80:fa:5b:06:72:b7 158.36.44.22",
"ff:ff:ff:ff:ff:ff 10.0.0.2"]
:returns: A list of IP addesses (v4 and v6)
"""
ip_list = []
for addr in addresses:
ip_list.extend([x for x in addr.split() if
(netutils.is_valid_ipv4(x) or
netutils.is_valid_ipv6(x))])
return ip_list
def get_allowed_address_pairs_ip_addresses(port):
"""Return a list of IP addresses from port's allowed_address_pairs.
:param port: A neutron port
:returns: A list of IP addesses (v4 and v6)
"""
return [x['ip_address'] for x in port.get('allowed_address_pairs', [])
if 'ip_address' in x]
def get_allowed_address_pairs_ip_addresses_from_ovn_port(ovn_port):
"""Return a list of IP addresses from ovn port.
Return a list of IP addresses equivalent of Neutron's port
allowed_address_pairs column using the data in the OVN port.
:param ovn_port: A OVN port
:returns: A list of IP addesses (v4 and v6)
"""
addresses = remove_macs_from_lsp_addresses(ovn_port.addresses)
port_security = remove_macs_from_lsp_addresses(ovn_port.port_security)
return [x for x in port_security if x not in addresses]
def ovn_metadata_name(id_):
"""Return the OVN metadata name based on an id."""
return 'metadata-%s' % id_
def is_gateway_chassis_invalid(chassis_name, gw_chassis,
physnet, chassis_physnets):
"""Check if gateway chassis is invalid
@param chassis_name: gateway chassis name
@type chassis_name: string
@param gw_chassis: List of gateway chassis in the system
@type gw_chassis: []
@param physnet: physical network associated to chassis_name
@type physnet: string
@param chassis_physnets: Dictionary linking chassis with their physnets
@type chassis_physnets: {}
@return Boolean
"""
if chassis_name == constants.OVN_GATEWAY_INVALID_CHASSIS:
return True
elif chassis_name not in chassis_physnets:
return True
elif physnet and physnet not in chassis_physnets.get(chassis_name):
return True
elif gw_chassis and chassis_name not in gw_chassis:
return True
return False
def is_neutron_dhcp_agent_port(port):
"""Check if the given DHCP port belongs to Neutron DHCP agents
The DHCP ports with the device_id equals to 'reserved_dhcp_port'
or starting with the word 'dhcp' belongs to the Neutron DHCP agents.
"""
return (port['device_owner'] == const.DEVICE_OWNER_DHCP and
(port['device_id'] == const.DEVICE_ID_RESERVED_DHCP_PORT or
port['device_id'].startswith('dhcp')))
def compute_address_pairs_diff(ovn_port, neutron_port):
"""Compute the differences in the allowed_address_pairs field."""
ovn_ap = get_allowed_address_pairs_ip_addresses_from_ovn_port(
ovn_port)
neutron_ap = get_allowed_address_pairs_ip_addresses(neutron_port)
added = set(neutron_ap) - set(ovn_ap)
removed = set(ovn_ap) - set(neutron_ap)
return AddrPairsDiff(added, removed, changed=any(added or removed))
def get_ovn_cms_options(chassis):
"""Return the list of CMS options in a Chassis."""
return [opt.strip() for opt in chassis.external_ids.get(
constants.OVN_CMS_OPTIONS, '').split(',')]
def is_gateway_chassis(chassis):
"""Check if the given chassis is a gateway chassis"""
return constants.CMS_OPT_CHASSIS_AS_GW in get_ovn_cms_options(chassis)
def get_port_capabilities(port):
"""Return a list of port's capabilities"""
return port.get(portbindings.PROFILE, {}).get('capabilities', [])
def get_port_id_from_gwc_row(row):
"""Return a port_id from gwc row
The Gateway_Chassis row stores router port_id in
the row name attribute:
<prefix>-<port_id>_<chassis_id>
:param row: A Gateway_Chassis table row.
:returns: String containing router port_id.
"""
return constants.RE_PORT_FROM_GWC.search(row.name).group(2)
def get_chassis_availability_zones(chassis):
"""Return a list of availability zones from a given OVN Chassis."""
azs = set()
if not chassis:
return azs
opt_key = constants.CMS_OPT_AVAILABILITY_ZONES + '='
for opt in get_ovn_cms_options(chassis):
if not opt.startswith(opt_key):
continue
values = opt.split('=')[1]
azs = {az.strip() for az in values.split(':') if az.strip()}
break
return azs
def get_chassis_in_azs(chassis_list, az_list):
"""Return a set of Chassis that belongs to the AZs.
Given a list of Chassis and a list of availability zones (AZs),
return a set of Chassis that belongs to one or more AZs.
:param chassis_list: A list of Chassis objects
:param az_list: A list of availability zones
:returns: A set of Chassis names
"""
chassis = set()
for ch in chassis_list:
chassis_azs = get_chassis_availability_zones(ch)
if chassis_azs.intersection(az_list):
chassis.add(ch.name)
return chassis
def get_gateway_chassis_without_azs(chassis_list):
"""Return a set of Chassis that does not belong to any AZs.
Filter a list of Chassis and return only the Chassis that does not
belong to any availability zones.
:param chassis_list: A list of Chassis objects
:returns: A set of Chassis names
"""
return {ch.name for ch in chassis_list if is_gateway_chassis(ch) and not
get_chassis_availability_zones(ch)}
def parse_ovn_lb_port_forwarding(ovn_rtr_lb_pfs):
"""Return a dictionary compatible with port forwarding from OVN lb."""
result = {}
for ovn_lb in ovn_rtr_lb_pfs:
ext_ids = ovn_lb.external_ids
fip_id = ext_ids.get(constants.OVN_FIP_EXT_ID_KEY)
protocol = (ovn_lb.protocol[0]
if ovn_lb.protocol else ovsdbapp_const.PROTO_TCP)
fip_dict = result.get(fip_id, {})
fip_dict_proto = fip_dict.get(protocol, set())
ovn_vips = ovn_lb.vips
for vip, ips in ovn_vips.items():
for ip in ips.split(','):
fip_dict_proto.add("{} {}".format(vip, ip))
fip_dict[protocol] = fip_dict_proto
result[fip_id] = fip_dict
return result
| 35.320261 | 79 | 0.672141 |
913c83f4f9cee2569debbb5a5301094fbb4ed18e
| 1,823 |
py
|
Python
|
ens/exceptions.py
|
pjryan93/web3.py
|
e066452a7b0e78d6cb8a9462532d169de901ef99
|
[
"MIT"
] | 326 |
2016-04-29T21:51:06.000Z
|
2022-03-31T03:20:54.000Z
|
ens/exceptions.py
|
pjryan93/web3.py
|
e066452a7b0e78d6cb8a9462532d169de901ef99
|
[
"MIT"
] | 283 |
2016-04-15T16:41:31.000Z
|
2017-11-28T16:41:36.000Z
|
ens/exceptions.py
|
pjryan93/web3.py
|
e066452a7b0e78d6cb8a9462532d169de901ef99
|
[
"MIT"
] | 146 |
2016-04-14T16:27:54.000Z
|
2021-10-03T13:31:07.000Z
|
import idna
| 22.7875 | 81 | 0.673066 |
913ca9c4582e3db5d9a5c8dc80fedece649fbdb9
| 1,082 |
py
|
Python
|
Submods/MAS Additions/MASM/scripts/midi_input.py
|
CaptainHorse/MAS-Additions
|
5714aaf8cfa3c57432f6231795cbe1d75df46f74
|
[
"MIT"
] | 13 |
2019-09-24T00:09:17.000Z
|
2022-02-26T20:24:18.000Z
|
Submods/MAS Additions/MASM/scripts/midi_input.py
|
CaptainHorse/MAS-Additions
|
5714aaf8cfa3c57432f6231795cbe1d75df46f74
|
[
"MIT"
] | 30 |
2019-06-28T03:16:33.000Z
|
2022-01-19T11:49:59.000Z
|
Submods/MAS Additions/MASM/scripts/midi_input.py
|
CaptainHorse/MAS-Additions
|
5714aaf8cfa3c57432f6231795cbe1d75df46f74
|
[
"MIT"
] | 4 |
2019-10-04T01:59:17.000Z
|
2022-02-26T20:24:20.000Z
|
import mido
from socketer import MASM
inPort = None
doReadInput = False
| 29.243243 | 99 | 0.677449 |
913cf201ceaa3cdf5791ad85165d65f001d7078a
| 1,896 |
py
|
Python
|
dash_carbon_components/Column.py
|
Matheus-Rangel/dash-carbon-components
|
e3f4aa4a8d649e2740db32677040f2548ef5da48
|
[
"Apache-2.0"
] | 4 |
2021-04-25T22:55:25.000Z
|
2021-12-10T04:52:30.000Z
|
dash_carbon_components/Column.py
|
Matheus-Rangel/dash-carbon-components
|
e3f4aa4a8d649e2740db32677040f2548ef5da48
|
[
"Apache-2.0"
] | null | null | null |
dash_carbon_components/Column.py
|
Matheus-Rangel/dash-carbon-components
|
e3f4aa4a8d649e2740db32677040f2548ef5da48
|
[
"Apache-2.0"
] | null | null | null |
# AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
| 49.894737 | 196 | 0.681962 |
913e527c83f21ed4118adbad50f5935916d3a9fa
| 2,221 |
py
|
Python
|
src/backend/schemas/vps.py
|
ddddhm1/LuWu
|
f9feaf10a6aca0dd31f250741a1c542ee5256633
|
[
"Apache-2.0"
] | 658 |
2019-04-29T02:46:02.000Z
|
2022-03-30T03:58:42.000Z
|
src/backend/schemas/vps.py
|
ddddhm1/LuWu
|
f9feaf10a6aca0dd31f250741a1c542ee5256633
|
[
"Apache-2.0"
] | 9 |
2020-06-04T13:38:58.000Z
|
2022-02-27T21:23:29.000Z
|
src/backend/schemas/vps.py
|
ddddhm1/LuWu
|
f9feaf10a6aca0dd31f250741a1c542ee5256633
|
[
"Apache-2.0"
] | 130 |
2019-05-02T23:42:58.000Z
|
2022-03-24T04:35:37.000Z
|
from typing import List
from typing import Optional
from typing import Union
from models.vps import VpsStatus
from schemas.base import APIModel
from schemas.base import BasePagination
from schemas.base import BaseSchema
from schemas.base import BaseSuccessfulResponseModel
| 22.663265 | 59 | 0.714093 |
913effe79b3a41e71c6774354a20673cc5bf2cf7
| 672 |
py
|
Python
|
main.py
|
hari-sh/sigplot
|
cd2359d7c868e35ed1d976d7eb8ac35d2dcc7e81
|
[
"MIT"
] | null | null | null |
main.py
|
hari-sh/sigplot
|
cd2359d7c868e35ed1d976d7eb8ac35d2dcc7e81
|
[
"MIT"
] | null | null | null |
main.py
|
hari-sh/sigplot
|
cd2359d7c868e35ed1d976d7eb8ac35d2dcc7e81
|
[
"MIT"
] | null | null | null |
import sigplot as sp
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
matplotlib.rcParams['toolbar'] = 'None'
plt.style.use('dark_background')
fig = plt.figure()
# seed = np.linspace(3, 7, 1000)
# a = (np.sin(2 * np.pi * seed))
# b = (np.cos(2 * np.pi * seed))
# sp.correlate(fig, b, a, 300)
t = np.linspace(0, 1, 500)
b = (np.cos(2 * np.pi * t))
# x = np.concatenate([np.zeros(500), signal.sawtooth(2 * np.pi * 5 * t), np.zeros(500), np.ones(120), np.zeros(500)])
x = np.concatenate([np.zeros(500), np.ones(500), np.zeros(500)])
sp.fourier_series(fig, x, 100, 200, 200)
plt.show()
# WriteToVideo("twoPulse.mp4", anim);
| 25.846154 | 118 | 0.623512 |
913f16898807024d65f74b71e35760e3bc3c6dbb
| 149,429 |
py
|
Python
|
test/vanilla/version-tolerant/Expected/AcceptanceTests/UrlVersionTolerant/urlversiontolerant/operations/_operations.py
|
msyyc/autorest.python
|
91aa86f51d5c43c10ead5d51ac102618d23e3a21
|
[
"MIT"
] | null | null | null |
test/vanilla/version-tolerant/Expected/AcceptanceTests/UrlVersionTolerant/urlversiontolerant/operations/_operations.py
|
msyyc/autorest.python
|
91aa86f51d5c43c10ead5d51ac102618d23e3a21
|
[
"MIT"
] | null | null | null |
test/vanilla/version-tolerant/Expected/AcceptanceTests/UrlVersionTolerant/urlversiontolerant/operations/_operations.py
|
msyyc/autorest.python
|
91aa86f51d5c43c10ead5d51ac102618d23e3a21
|
[
"MIT"
] | 1 |
2022-03-28T08:58:03.000Z
|
2022-03-28T08:58:03.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
import functools
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .._vendor import _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
| 41.233168 | 245 | 0.675491 |
913f9ce1958e1ba194c9448681b6fa2b1b835522
| 1,668 |
py
|
Python
|
baseplate_py_upgrader/docker.py
|
reddit/baseplate.py-upgrader
|
2e4b019de7c22e2d2467eba488867fe81d7d5fc1
|
[
"BSD-3-Clause"
] | 6 |
2020-07-09T02:25:23.000Z
|
2021-09-24T17:28:41.000Z
|
baseplate_py_upgrader/docker.py
|
Seanpm2001-reddit/baseplate.py-upgrader
|
a554418c638022b461cf5cae17e894280cf76a25
|
[
"BSD-3-Clause"
] | 9 |
2019-08-13T20:29:04.000Z
|
2022-03-04T19:11:47.000Z
|
baseplate_py_upgrader/docker.py
|
Seanpm2001-reddit/baseplate.py-upgrader
|
a554418c638022b461cf5cae17e894280cf76a25
|
[
"BSD-3-Clause"
] | 4 |
2020-12-11T21:59:37.000Z
|
2022-03-04T00:10:43.000Z
|
import logging
import re
from pathlib import Path
from typing import Match
logger = logging.getLogger(__name__)
IMAGE_RE = re.compile(
r"/baseplate-py:(?P<version>[0-9.]+(\.[0-9]+)?)-py(?P<python>[23]\.[0-9]+)-(?P<distro>(bionic|buster))(?P<repo>-artifactory)?(?P<dev>-dev)?"
)
| 30.327273 | 144 | 0.658873 |
913fb3fc99b72d4e97ce88b0037ce6490e6db9c1
| 1,249 |
py
|
Python
|
model/swtz_ty.py
|
ArcherLuo233/election-s-prediction
|
9da72cb855f6d61f9cdec6e15f7ca832629ba51a
|
[
"MIT"
] | null | null | null |
model/swtz_ty.py
|
ArcherLuo233/election-s-prediction
|
9da72cb855f6d61f9cdec6e15f7ca832629ba51a
|
[
"MIT"
] | 1 |
2022-01-26T01:23:26.000Z
|
2022-01-26T01:23:34.000Z
|
model/swtz_ty.py
|
ArcherLuo233/election-s-prediction
|
9da72cb855f6d61f9cdec6e15f7ca832629ba51a
|
[
"MIT"
] | 1 |
2021-11-08T10:58:23.000Z
|
2021-11-08T10:58:23.000Z
|
from sqlalchemy import Column, ForeignKey, Integer, String, Text
from model.base import Base
| 26.020833 | 73 | 0.566853 |
913fde7505a4c384507f28eb2cee97a556b8c075
| 3,515 |
py
|
Python
|
amy/dashboard/tests/test_autoupdate_profile.py
|
code-review-doctor/amy
|
268c1a199510457891459f3ddd73fcce7fe2b974
|
[
"MIT"
] | 53 |
2015-01-10T17:39:19.000Z
|
2019-06-12T17:36:34.000Z
|
amy/dashboard/tests/test_autoupdate_profile.py
|
code-review-doctor/amy
|
268c1a199510457891459f3ddd73fcce7fe2b974
|
[
"MIT"
] | 1,176 |
2015-01-02T06:32:47.000Z
|
2019-06-18T11:57:47.000Z
|
amy/dashboard/tests/test_autoupdate_profile.py
|
code-review-doctor/amy
|
268c1a199510457891459f3ddd73fcce7fe2b974
|
[
"MIT"
] | 44 |
2015-01-03T15:08:56.000Z
|
2019-06-09T05:33:08.000Z
|
from django.urls import reverse
from consents.models import Consent, Term
from workshops.models import KnowledgeDomain, Person, Qualification
from workshops.tests.base import TestBase
| 34.80198 | 83 | 0.586629 |
9140f295d54089cb5cee0de94bb54febfe097979
| 4,823 |
py
|
Python
|
bot/recognizer_bot/yolo/common/utils.py
|
kprokofi/animal-recognition-with-voice
|
e9e5235315255eb6e17df3dba616b2ed4c902c92
|
[
"MIT"
] | 1 |
2021-03-18T05:51:10.000Z
|
2021-03-18T05:51:10.000Z
|
bot/recognizer_bot/yolo/common/utils.py
|
kprokofi/animal-recognition-with-voice
|
e9e5235315255eb6e17df3dba616b2ed4c902c92
|
[
"MIT"
] | 3 |
2021-04-11T20:52:44.000Z
|
2021-06-13T13:46:08.000Z
|
bot/recognizer_bot/yolo/common/utils.py
|
kprokofi/animal-recognition-with-voice
|
e9e5235315255eb6e17df3dba616b2ed4c902c92
|
[
"MIT"
] | null | null | null |
import numpy as np
import time
import cv2
import colorsys
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Activation, ReLU, Multiply
# Custom objects from backbones package https://github.com/david8862/keras-YOLOv3-model-set/tree/master/common/backbones
def swish(x):
"""Swish activation function.
# Arguments
x: Input tensor.
# Returns
The Swish activation: `x * sigmoid(x)`.
# References
[Searching for Activation Functions](https://arxiv.org/abs/1710.05941)
"""
if K.backend() == 'tensorflow':
try:
# The native TF implementation has a more
# memory-efficient gradient implementation
return K.tf.nn.swish(x)
except AttributeError:
pass
return x * K.sigmoid(x)
def get_custom_objects():
'''
form up a custom_objects dict so that the customized
layer/function call could be correctly parsed when keras
.h5 model is loading or converting
'''
custom_objects_dict = {
'tf': tf,
'swish': swish,
'hard_sigmoid': hard_sigmoid,
'hard_swish': hard_swish,
'mish': mish
}
return custom_objects_dict
def resize_anchors(base_anchors, target_shape, base_shape=(416, 416)):
'''
original anchor size is clustered from COCO dataset
under input shape (416,416). We need to resize it to
our train input shape for better performance
'''
return np.around(base_anchors*target_shape[::-1]/base_shape[::-1])
def get_classes(classes_path):
'''loads the classes'''
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
| 28.708333 | 120 | 0.618495 |
9142c85805b0c1a34b37861799a56fa0542af061
| 2,706 |
py
|
Python
|
ros/src/tl_detector/light_classification/tl_classifier.py
|
PhilippHafe/CarND-Capstone
|
9f933c817b11e7a093c3f2b07fad10710f7eb551
|
[
"MIT"
] | null | null | null |
ros/src/tl_detector/light_classification/tl_classifier.py
|
PhilippHafe/CarND-Capstone
|
9f933c817b11e7a093c3f2b07fad10710f7eb551
|
[
"MIT"
] | null | null | null |
ros/src/tl_detector/light_classification/tl_classifier.py
|
PhilippHafe/CarND-Capstone
|
9f933c817b11e7a093c3f2b07fad10710f7eb551
|
[
"MIT"
] | null | null | null |
from styx_msgs.msg import TrafficLight
import tensorflow as tf
import numpy as np
import datetime
| 39.217391 | 109 | 0.631929 |
9143774e616443b37cd584d3970647098c72f10f
| 16,563 |
py
|
Python
|
testGMDS.py
|
ctralie/SiRPyGL
|
e06c317ed60321d492725e39fd8fcc0ce56ff4c0
|
[
"Apache-2.0"
] | 7 |
2017-10-06T05:33:28.000Z
|
2021-04-20T20:06:53.000Z
|
testGMDS.py
|
ctralie/SiRPyGL
|
e06c317ed60321d492725e39fd8fcc0ce56ff4c0
|
[
"Apache-2.0"
] | null | null | null |
testGMDS.py
|
ctralie/SiRPyGL
|
e06c317ed60321d492725e39fd8fcc0ce56ff4c0
|
[
"Apache-2.0"
] | 4 |
2015-03-20T13:14:36.000Z
|
2019-04-19T10:34:51.000Z
|
#Based off of http://wiki.wxpython.org/GLCanvas
#Lots of help from http://wiki.wxpython.org/Getting%20Started
from OpenGL.GL import *
import wx
from wx import glcanvas
from Primitives3D import *
from PolyMesh import *
from LaplacianMesh import *
from Geodesics import *
from PointCloud import *
from Cameras3D import *
from ICP import *
from sys import exit, argv
import random
import numpy as np
import scipy.io as sio
from pylab import cm
import os
import subprocess
import math
import time
#from sklearn import manifold
from GMDS import *
DEFAULT_SIZE = wx.Size(1200, 800)
DEFAULT_POS = wx.Point(10, 10)
PRINCIPAL_AXES_SCALEFACTOR = 1
if __name__ == '__main__':
m1 = None
m2 = None
if len(argv) >= 3:
m1 = argv[1]
m2 = argv[2]
viewer = MeshViewer(m1, m2)
| 34.010267 | 152 | 0.71871 |
91439c7735cd8dec720dbcbb904a5ff89db7c69f
| 17,382 |
py
|
Python
|
PySS/fem.py
|
manpan-1/PySS
|
1e4b13de3b2aed13ecf9818f9084a2fedb295cf1
|
[
"MIT"
] | 2 |
2018-12-03T13:53:00.000Z
|
2019-10-20T14:30:57.000Z
|
PySS/fem.py
|
manpan-1/PySS
|
1e4b13de3b2aed13ecf9818f9084a2fedb295cf1
|
[
"MIT"
] | null | null | null |
PySS/fem.py
|
manpan-1/PySS
|
1e4b13de3b2aed13ecf9818f9084a2fedb295cf1
|
[
"MIT"
] | 1 |
2018-03-23T19:58:21.000Z
|
2018-03-23T19:58:21.000Z
|
import matplotlib.pyplot as plt
import numpy as np
import pickle
# import csv
# from collections import namedtuple
# from mpl_toolkits.mplot3d import Axes3D
# import matplotlib.animation as animation
# import matplotlib.colors as mc
#
# class ParametricDB:
# def __init__(self, dimensions, responses):
# self.responses = responses
# self.dimensions = dimensions
#
# @classmethod
# def from_file(cls, filename):
# """
# Create from file.
#
# The file should be comma separated, first row titles, subsequent rows only numbers.
#
# Parameters
# ----------
# filename : str
# Relative path/filename.
#
# Return
# ------
# ParametricDB
#
# """
# # with open(filename, 'rU') as infile:
# # reader = csv.reader(infile)
# # n_dim = int(next(reader)[0].split()[0])
# # db = {c[0]: c[1:] for c in zip(*reader)}
#
# with open(filename, 'rU') as infile:
# reader = csv.reader(infile, delimiter=";")
# n_dim = int(next(reader)[0].split()[0])
# db = [c for c in zip(*reader)]
#
# all_responses = {i[0]: i[1:] for i in db[n_dim:]}
#
# dim_ticks = np.array([i[1:] for i in db[:n_dim]]).T
# dim_lengths = [len(set(dim_ticks[:, i])) for i in range(n_dim)]
# dim_names = [db[i][0] for i in range(n_dim)]
#
# # with open(filename, 'r') as infile:
# # all_lines = [[c.split(sep=":")[0]] + c.split(sep=":")[1].split(sep=",") for c in infile]
# # db = {c[0]: c[1:] for c in zip(*all_lines)}
#
# # for key in db.keys():
# # if len(key.split(",")) > 1:
# # n_dim = len(key.split(","))
# # dim_str = key
# # dim_ticks = np.array([c.split(sep=",") for c in db[dim_str]])
# # dim_lengths = [len(set(dim_ticks[:, i])) for i in range(n_dim)]
# # dim_names = dim_str.split(sep=",")
# full_list = {i[0]: i[1:][0] for i in zip(dim_names, dim_ticks.T)}
#
# # del db[dim_str]
#
# #df = pd.DataFrame(full_dict)
#
# Address = namedtuple("map", " ".join(dim_names))
# args = [tuple(sorted(set(dim_ticks[:, i]))) for i, j in enumerate(dim_names)]
# addressbook = Address(*args)
#
# mtx = {i: np.empty(dim_lengths) for i in all_responses.keys()}
# for response in all_responses.keys():
# for i, response_value in enumerate(all_responses[response]):
# current_idx = tuple(addressbook[idx].index(full_list[name][i]) for idx, name in enumerate(dim_names))
# mtx[response][current_idx] = response_value
# mtx[response].flags.writeable = False
#
# return cls(addressbook, mtx)
#
# def get_slice(self, slice_at, response):
# """
# Get a slice of the database.
#
# Parameters
# ----------
# slice_at : dict of int
# A dictionary of the keys to be sliced at the assigned values.
# response : str
# The name of the requested response to be sliced.
#
# """
#
# idx_arr = [0]*len(self.dimensions)
#
# for key in self.dimensions._fields:
# if key not in slice_at.keys():
# idx_arr[self.get_idx(key)] = slice(None, None)
# for name, value in zip(slice_at.keys(), slice_at.values()):
# idx_arr[self.get_idx(name)] = value
#
# return self.responses[response][idx_arr]
#
# def get_idx(self, attrname):
# """
# Get the index number of a parameter (dimension) in the database.
#
# Parameters
# ----------
# attrname : str
#
# """
# return(self.dimensions.index(self.dimensions.__getattribute__(attrname)))
#
# def contour_2d(self, slice_at, response, transpose=False, fig=None, sbplt=None):
# """
# Contour plot.
# :param slice_at:
# :return:
# """
# plt.rc('text', usetex=True)
# if fig is None:
# fig = plt.figure()
# if sbplt is None:
# ax = fig.add_subplot(111)
# else:
# ax = fig.add_subplot(sbplt)
# else:
# if sbplt is None:
# ax = fig.add_subplot(111)
# else:
# ax = fig.add_subplot(sbplt)
#
# axes = [key for key in self.dimensions._fields if key not in slice_at.keys()]
#
# if transpose:
# X, Y = np.meshgrid(self.dimensions[self.get_idx(axes[1])], self.dimensions[self.get_idx(axes[0])])
# Z = self.get_slice(slice_at, response).T
# x_label, y_label = axes[1], axes[0]
# else:
# X, Y = np.meshgrid(self.dimensions[self.get_idx(axes[0])], self.dimensions[self.get_idx(axes[1])])
# Z = self.get_slice(slice_at, response)
# x_label, y_label = axes[0], axes[1]
#
# ttl_values = [self.dimensions[self.get_idx(i)][slice_at[i]] for i in slice_at.keys()]
#
# # levels = np.arange(0, 2., 0.025)
# # sbplt = ax.contour(X.astype(np.float), Y.astype(np.float), Z.T, vmin=0.4, vmax=1., levels=levels, cmap=plt.cm.inferno)
# sbplt = ax.contour(X.astype(np.float), Y.astype(np.float), Z.T, cmap=plt.cm.gray_r)
# sbplt2 = ax.contourf(X.astype(np.float), Y.astype(np.float), Z.T, cmap=plt.cm.inferno)
# plt.clabel(sbplt, inline=1, fontsize=10)
# ttl = [i for i in zip(slice_at.keys(), ttl_values)]
# ttl = ", ".join(["=".join(i) for i in ttl])
# ax.set_title("$" + response + "$" + " for : " + "$" + ttl + "$")
# ax.set_xlabel("$"+x_label+"$")
# ax.set_ylabel("$"+y_label+"$")
#
# return fig
#
# def surf_3d(self, slice_at, response, transpose=False, fig=None, sbplt=None):
# """
# Surface plot.
# :param slice_at:
# :return:
# """
# #Convenient window dimensions
# # one subplot:
# # 2 side by side: Bbox(x0=0.0, y0=0.0, x1=6.79, y1=2.57)
# # azim elev = -160 30
# # 3 subplots side by side
# # 4 subplots: Bbox(x0=0.0, y0=0.0, x1=6.43, y1=5.14)
# #azim elev -160 30
# plt.rc('text', usetex=True)
# if fig is None:
# fig = plt.figure()
# if sbplt is None:
# ax = fig.add_subplot(111, projection='3d')
# else:
# ax = fig.add_subplot(sbplt, projection='3d')
# else:
# if sbplt is None:
# ax = fig.add_subplot(111, projection='3d')
# else:
# ax = fig.add_subplot(sbplt, projection='3d')
#
#
# axes = [key for key in self.dimensions._fields if key not in slice_at.keys()]
#
# if transpose:
# X, Y = np.meshgrid(self.dimensions[self.get_idx(axes[1])], self.dimensions[self.get_idx(axes[0])])
# Z = self.get_slice(slice_at, response).T
# x_label, y_label = axes[1], axes[0]
# else:
# X, Y = np.meshgrid(self.dimensions[self.get_idx(axes[0])], self.dimensions[self.get_idx(axes[1])])
# Z = self.get_slice(slice_at, response)
# x_label, y_label = axes[0], axes[1]
#
# ttl_values = [self.dimensions[self.get_idx(i)][slice_at[i]] for i in slice_at.keys()]
#
# sbplt = ax.plot_surface(X.astype(np.float), Y.astype(np.float), Z.T, cmap=plt.cm.inferno)
# # plt.clabel(sbplt, inline=1, fontsize=10)
# ttl = [i for i in zip(slice_at.keys(), ttl_values)]
# ttl = ", ".join(["=".join(i) for i in ttl])
# ax.set_title("$" + response + "$" + " for : " + "$" + ttl + "$")
# ax.set_xlabel("$"+x_label+"$")
# ax.set_ylabel("$"+y_label+"$")
#
# return fig
#
# def match_viewports(fig=None):
# if fig is None:
# fig = plt.gcf()
# fig.axes[1].view_init(azim=fig.axes[0].azim, elev=fig.axes[0].elev)
def main():
lambda01 = ParametricDB.from_file("data/fem/fem-results_lambda01.dat")
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcA, f_yield: 355 MPa, lambda_flex: 0.1")
lambda01.contour_2d({"plate_imp": 0, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[0, 0])
lambda01.contour_2d({"plate_imp": 1, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[0, 1])
lambda01.contour_2d({"plate_imp": 2, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[0, 2])
lambda01.contour_2d({"plate_imp": 3, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[1, 0])
lambda01.contour_2d({"plate_imp": 4, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[1, 1])
lambda01.contour_2d({"plate_imp": 5, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcB, f_yield: 355 MPa, lambda_flex: 0.1")
lambda01.contour_2d({"plate_imp": 0, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[0, 0])
lambda01.contour_2d({"plate_imp": 1, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[0, 1])
lambda01.contour_2d({"plate_imp": 2, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[0, 2])
lambda01.contour_2d({"plate_imp": 3, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[1, 0])
lambda01.contour_2d({"plate_imp": 4, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[1, 1])
lambda01.contour_2d({"plate_imp": 5, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcC, f_yield: 355 MPa, lambda_flex: 0.1")
lambda01.contour_2d({"plate_imp": 0, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[0, 0])
lambda01.contour_2d({"plate_imp": 1, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[0, 1])
lambda01.contour_2d({"plate_imp": 2, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[0, 2])
lambda01.contour_2d({"plate_imp": 3, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[1, 0])
lambda01.contour_2d({"plate_imp": 4, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[1, 1])
lambda01.contour_2d({"plate_imp": 5, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcA, f_yield: 700 MPa, lambda_flex: 0.1")
lambda01.contour_2d({"plate_imp": 0, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[0, 0])
lambda01.contour_2d({"plate_imp": 1, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[0, 1])
lambda01.contour_2d({"plate_imp": 2, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[0, 2])
lambda01.contour_2d({"plate_imp": 3, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[1, 0])
lambda01.contour_2d({"plate_imp": 4, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[1, 1])
lambda01.contour_2d({"plate_imp": 5, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcB, f_yield: 700 MPa, lambda_flex: 0.1")
lambda01.contour_2d({"plate_imp": 0, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[0, 0])
lambda01.contour_2d({"plate_imp": 1, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[0, 1])
lambda01.contour_2d({"plate_imp": 2, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[0, 2])
lambda01.contour_2d({"plate_imp": 3, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[1, 0])
lambda01.contour_2d({"plate_imp": 4, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[1, 1])
lambda01.contour_2d({"plate_imp": 5, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcC, f_yield: 700 MPa, lambda_flex: 0.1")
lambda01.contour_2d({"plate_imp": 0, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[0, 0])
lambda01.contour_2d({"plate_imp": 1, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[0, 1])
lambda01.contour_2d({"plate_imp": 2, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[0, 2])
lambda01.contour_2d({"plate_imp": 3, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[1, 0])
lambda01.contour_2d({"plate_imp": 4, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[1, 1])
lambda01.contour_2d({"plate_imp": 5, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[1, 2])
lambda02 = ParametricDB.from_file("data/fem/fem-results-lambda02.dat")
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcA, f_yield: 355 MPa, lambda_flex: 0.2")
lambda02.contour_2d({"plate_imp": 0, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[0, 0])
lambda02.contour_2d({"plate_imp": 1, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[0, 1])
lambda02.contour_2d({"plate_imp": 2, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[0, 2])
lambda02.contour_2d({"plate_imp": 3, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[1, 0])
lambda02.contour_2d({"plate_imp": 4, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[1, 1])
lambda02.contour_2d({"plate_imp": 5, "fab_class": 0, "f_yield": 0}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcB, f_yield: 355 MPa, lambda_flex: 0.2")
lambda02.contour_2d({"plate_imp": 0, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[0, 0])
lambda02.contour_2d({"plate_imp": 1, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[0, 1])
lambda02.contour_2d({"plate_imp": 2, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[0, 2])
lambda02.contour_2d({"plate_imp": 3, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[1, 0])
lambda02.contour_2d({"plate_imp": 4, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[1, 1])
lambda02.contour_2d({"plate_imp": 5, "fab_class": 1, "f_yield": 0}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcC, f_yield: 355 MPa, lambda_flex: 0.2")
lambda02.contour_2d({"plate_imp": 0, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[0, 0])
lambda02.contour_2d({"plate_imp": 1, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[0, 1])
lambda02.contour_2d({"plate_imp": 2, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[0, 2])
lambda02.contour_2d({"plate_imp": 3, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[1, 0])
lambda02.contour_2d({"plate_imp": 4, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[1, 1])
lambda02.contour_2d({"plate_imp": 5, "fab_class": 2, "f_yield": 0}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcA, f_yield: 700 MPa, lambda_flex: 0.2")
lambda02.contour_2d({"plate_imp": 0, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[0, 0])
lambda02.contour_2d({"plate_imp": 1, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[0, 1])
lambda02.contour_2d({"plate_imp": 2, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[0, 2])
lambda02.contour_2d({"plate_imp": 3, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[1, 0])
lambda02.contour_2d({"plate_imp": 4, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[1, 1])
lambda02.contour_2d({"plate_imp": 5, "fab_class": 0, "f_yield": 1}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcB, f_yield: 700 MPa, lambda_flex: 0.2")
lambda02.contour_2d({"plate_imp": 0, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[0, 0])
lambda02.contour_2d({"plate_imp": 1, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[0, 1])
lambda02.contour_2d({"plate_imp": 2, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[0, 2])
lambda02.contour_2d({"plate_imp": 3, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[1, 0])
lambda02.contour_2d({"plate_imp": 4, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[1, 1])
lambda02.contour_2d({"plate_imp": 5, "fab_class": 1, "f_yield": 1}, "lpf", ax=ax[1, 2])
fig, ax = plt.subplots(nrows=2, ncols=3)
fig.suptitle("fab_class: fcC, f_yield: 700 MPa, lambda_flex: 0.2")
lambda02.contour_2d({"plate_imp": 0, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[0, 0])
lambda02.contour_2d({"plate_imp": 1, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[0, 1])
lambda02.contour_2d({"plate_imp": 2, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[0, 2])
lambda02.contour_2d({"plate_imp": 3, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[1, 0])
lambda02.contour_2d({"plate_imp": 4, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[1, 1])
lambda02.contour_2d({"plate_imp": 5, "fab_class": 2, "f_yield": 1}, "lpf", ax=ax[1, 2])
return
| 47.884298 | 130 | 0.56921 |
9143b8c633adb2c76477406a889fd2a426c5cda8
| 278 |
py
|
Python
|
gigamonkeys/get.py
|
gigamonkey/sheets
|
a89e76360ad9a35e44e5e352346eeccbe6952b1f
|
[
"BSD-3-Clause"
] | null | null | null |
gigamonkeys/get.py
|
gigamonkey/sheets
|
a89e76360ad9a35e44e5e352346eeccbe6952b1f
|
[
"BSD-3-Clause"
] | 1 |
2021-04-03T23:07:35.000Z
|
2021-04-03T23:07:35.000Z
|
gigamonkeys/get.py
|
gigamonkey/sheets
|
a89e76360ad9a35e44e5e352346eeccbe6952b1f
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import json
import sys
from gigamonkeys.spreadsheets import spreadsheets
spreadsheet_id = sys.argv[1]
ranges = sys.argv[2:]
data = spreadsheets().get(spreadsheet_id, include_grid_data=bool(ranges), ranges=ranges)
json.dump(data, sys.stdout, indent=2)
| 19.857143 | 88 | 0.773381 |
9143ca3c30c3da5376a215dd32db4d9aec05c6ba
| 282 |
py
|
Python
|
config.py
|
mhmddpkts/Get-Turkish-Words-with-Web-Scraping
|
6e344640f6dc512f03a9b59522876ce7b6339a86
|
[
"MIT"
] | null | null | null |
config.py
|
mhmddpkts/Get-Turkish-Words-with-Web-Scraping
|
6e344640f6dc512f03a9b59522876ce7b6339a86
|
[
"MIT"
] | null | null | null |
config.py
|
mhmddpkts/Get-Turkish-Words-with-Web-Scraping
|
6e344640f6dc512f03a9b59522876ce7b6339a86
|
[
"MIT"
] | null | null | null |
root_URL = "https://tr.wiktionary.org/wiki/Vikis%C3%B6zl%C3%BCk:S%C3%B6zc%C3%BCk_listesi_"
filepath = "words.csv"
#letters=["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O",
# "P","R","S","T","U","V","Y","Z"] ##,,,, harfleri not work correctly
letters=["C"]
| 35.25 | 90 | 0.539007 |
91469ce6ec9fde95e8590b13e1386757a2494a57
| 1,374 |
py
|
Python
|
sow_generator/tasks.py
|
praekelt/sow-generator
|
eb5dab3b3231688966254a1797ced7eec67b6e8a
|
[
"BSD-3-Clause"
] | 1 |
2016-04-14T08:34:48.000Z
|
2016-04-14T08:34:48.000Z
|
sow_generator/tasks.py
|
praekelt/sow-generator
|
eb5dab3b3231688966254a1797ced7eec67b6e8a
|
[
"BSD-3-Clause"
] | null | null | null |
sow_generator/tasks.py
|
praekelt/sow-generator
|
eb5dab3b3231688966254a1797ced7eec67b6e8a
|
[
"BSD-3-Clause"
] | null | null | null |
from github3 import login
from github3.models import GitHubError
from celery import task
from celery.decorators import periodic_task
from celery.task.schedules import crontab
from sow_generator.models import Repository, AuthToken
| 29.869565 | 72 | 0.621543 |
9146c7949d8b05d057e0f629fb324a047f0358c0
| 6,145 |
py
|
Python
|
sources/wrappers.py
|
X-rayLaser/keras-auto-hwr
|
67cfc0209045b1e211f0491b0199cb9d6811bfd0
|
[
"MIT"
] | null | null | null |
sources/wrappers.py
|
X-rayLaser/keras-auto-hwr
|
67cfc0209045b1e211f0491b0199cb9d6811bfd0
|
[
"MIT"
] | 2 |
2020-01-04T09:03:31.000Z
|
2021-05-10T18:29:41.000Z
|
sources/wrappers.py
|
X-rayLaser/keras-auto-hwr
|
67cfc0209045b1e211f0491b0199cb9d6811bfd0
|
[
"MIT"
] | null | null | null |
import numpy as np
from sources import BaseSource
from sources.base import BaseSourceWrapper
from sources.preloaded import PreLoadedSource
import json
def preprocess(self, X):
res = []
for x in X:
x_norm = (x - self._mu) / self._sd
# we do not want to normalize END-OF-STROKE flag which is last in the tuple
x_norm[:, -1] = np.array(x)[:, -1]
res.append(x_norm.tolist())
return res
class OffsetPointsSource(BaseSource):
class NormalizedSource(BaseSource):
class DenormalizedSource(BaseSource):
class H5pySource(BaseSource):
class PreprocessedSource(BaseSourceWrapper):
class ConstrainedSource(BaseSourceWrapper):
class PlainListSource(BaseSourceWrapper):
| 26.038136 | 87 | 0.588771 |
91472db15a8c58afa56167fc11db5c1a1643924e
| 10,956 |
py
|
Python
|
multiworld/multiworld/core/image_env.py
|
yufeiwang63/ROLL
|
aba0b4530934946eb9c41fbe5a0d6c27775596ff
|
[
"MIT"
] | 11 |
2020-11-04T03:15:27.000Z
|
2021-11-25T16:00:41.000Z
|
multiworld/multiworld/core/image_env.py
|
yufeiwang63/ROLL
|
aba0b4530934946eb9c41fbe5a0d6c27775596ff
|
[
"MIT"
] | null | null | null |
multiworld/multiworld/core/image_env.py
|
yufeiwang63/ROLL
|
aba0b4530934946eb9c41fbe5a0d6c27775596ff
|
[
"MIT"
] | 3 |
2020-11-19T14:16:56.000Z
|
2021-11-25T16:01:13.000Z
|
import random
import cv2
import numpy as np
import warnings
from PIL import Image
from gym.spaces import Box, Dict
from multiworld.core.multitask_env import MultitaskEnv
from multiworld.core.wrapper_env import ProxyEnv
from multiworld.envs.env_util import concatenate_box_spaces
from multiworld.envs.env_util import get_stat_in_paths, create_stats_ordered_dict
| 38.174216 | 104 | 0.621486 |
91487dc34ce39dcba03a9475df5437871d95ebe4
| 2,546 |
py
|
Python
|
sample_full_post_processor.py
|
huynguyen82/Modified-Kaldi-GStream-OnlineServer
|
e7429a5e44b9567b603523c0046fb42d8503a275
|
[
"BSD-2-Clause"
] | null | null | null |
sample_full_post_processor.py
|
huynguyen82/Modified-Kaldi-GStream-OnlineServer
|
e7429a5e44b9567b603523c0046fb42d8503a275
|
[
"BSD-2-Clause"
] | 1 |
2021-03-25T23:17:23.000Z
|
2021-03-25T23:17:23.000Z
|
sample_full_post_processor.py
|
huynguyen82/Modified-Kaldi-GStream-OnlineServer
|
e7429a5e44b9567b603523c0046fb42d8503a275
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
import sys
import json
import logging
from math import exp
import requests as rq
import re
### For NLP post-processing
header={"Content-Type": "application/json"}
message='{"sample":"Hello bigdata"}'
api_url="http://192.168.1.197:11992/norm"
###
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, format="%(levelname)8s %(asctime)s %(message)s ")
lines = []
while True:
l = sys.stdin.readline()
if not l: break # EOF
if l.strip() == "":
if len(lines) > 0:
result_json = post_process_json("".join(lines))
print result_json
print
sys.stdout.flush()
lines = []
else:
lines.append(l)
if len(lines) > 0:
result_json = post_process_json("".join(lines))
print result_json
lines = []
| 34.405405 | 94 | 0.565593 |
914899652debcd6bf278b6bcd59488d3ca01a934
| 349 |
py
|
Python
|
lang_detect_gears.py
|
AlexMikhalev/cord19redisknowledgegraph
|
a143415aca8d4a6db820dc7a25280045f421a665
|
[
"Apache-2.0"
] | 7 |
2020-05-18T09:25:17.000Z
|
2021-08-05T00:23:36.000Z
|
lang_detect_gears.py
|
maraqa1/CORD-19
|
a473f7b60b8dfa476ea46505678481e4b361d04e
|
[
"Apache-2.0"
] | 10 |
2020-05-31T14:44:26.000Z
|
2022-03-25T19:17:37.000Z
|
lang_detect_gears.py
|
maraqa1/CORD-19
|
a473f7b60b8dfa476ea46505678481e4b361d04e
|
[
"Apache-2.0"
] | null | null | null |
from langdetect import detect
gb = GB()
gb.foreach(detect_language)
gb.run('title:*')
| 23.266667 | 52 | 0.60745 |
91492cd2d90ac485784d8d45eca57302464591f8
| 21,084 |
py
|
Python
|
daemon/core/coreobj.py
|
shanv82/core
|
70abb8cc1426ffceb53a03e84edc26f56f9ed4c0
|
[
"BSD-2-Clause"
] | null | null | null |
daemon/core/coreobj.py
|
shanv82/core
|
70abb8cc1426ffceb53a03e84edc26f56f9ed4c0
|
[
"BSD-2-Clause"
] | null | null | null |
daemon/core/coreobj.py
|
shanv82/core
|
70abb8cc1426ffceb53a03e84edc26f56f9ed4c0
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Defines the basic objects for CORE emulation: the PyCoreObj base class, along with PyCoreNode,
PyCoreNet, and PyCoreNetIf.
"""
import os
import shutil
import socket
import threading
from socket import AF_INET
from socket import AF_INET6
from core.data import NodeData, LinkData
from core.enumerations import LinkTypes
from core.misc import ipaddress
| 27.852048 | 100 | 0.558338 |
914969a6475944053d8a15e1118e2d12ecdc9855
| 349 |
py
|
Python
|
abc/128/b.py
|
wotsushi/competitive-programming
|
17ec8fd5e1c23aee626aee70b1c0da8d7f8b8c86
|
[
"MIT"
] | 3 |
2019-06-25T06:17:38.000Z
|
2019-07-13T15:18:51.000Z
|
abc/128/b.py
|
wotsushi/competitive-programming
|
17ec8fd5e1c23aee626aee70b1c0da8d7f8b8c86
|
[
"MIT"
] | null | null | null |
abc/128/b.py
|
wotsushi/competitive-programming
|
17ec8fd5e1c23aee626aee70b1c0da8d7f8b8c86
|
[
"MIT"
] | null | null | null |
#
N = int(input())
S, P = (
zip(*(
(s, int(p))
for s, p in (input().split() for _ in range(N))
)) if N else
((), ())
)
ans = '\n'.join(
str(i)
for _, _, i in sorted(
zip(
S,
P,
range(1, N + 1)
),
key=lambda t: (t[0], -t[1])
)
)
#
print(ans)
| 13.96 | 55 | 0.34384 |
914b520c0a97da68019f1f6058aa11f3ec987d8a
| 1,915 |
py
|
Python
|
additional/hashcat_crack.py
|
mmmds/WirelessDiscoverCrackScan
|
2eda9bd7c474d91ea08511a7322f5ba14d034f3d
|
[
"MIT"
] | 2 |
2020-02-09T15:35:05.000Z
|
2020-04-15T10:01:24.000Z
|
additional/hashcat_crack.py
|
mmmds/WirelessDiscoverCrackScan
|
2eda9bd7c474d91ea08511a7322f5ba14d034f3d
|
[
"MIT"
] | null | null | null |
additional/hashcat_crack.py
|
mmmds/WirelessDiscoverCrackScan
|
2eda9bd7c474d91ea08511a7322f5ba14d034f3d
|
[
"MIT"
] | null | null | null |
# External cracking script, part of https://github.com/mmmds/WirelessDiscoverCrackScan
import datetime
import subprocess
import os
### CONFIGURATION
HASHCAT_DIR = "C:\\hashcat-5.1.0"
HASHCAT_EXE = "hashcat64.exe"
LOG_FILE = "crack_log.txt"
DICT_DIR = "./dicts"
files = read_files()
logs = parse_log()
dicts = load_dict_list()
print(dicts)
print(files)
print(logs)
pmkid = files[0]
hs4 = files[1]
process(pmkid, "16800", logs, dicts)
process(hs4, "2500", logs, dicts)
| 27.357143 | 170 | 0.518538 |
914c1ed0296d91a573e8d232f2ea7fec8dafd2e3
| 46,013 |
py
|
Python
|
editortools/player.py
|
bennettdc/MCEdit-Unified
|
90abfb170c65b877ac67193e717fa3a3ded635dd
|
[
"0BSD"
] | 237 |
2018-02-04T19:13:31.000Z
|
2022-03-26T03:06:07.000Z
|
editortools/player.py
|
bennettdc/MCEdit-Unified
|
90abfb170c65b877ac67193e717fa3a3ded635dd
|
[
"0BSD"
] | 551 |
2015-01-01T02:36:53.000Z
|
2018-02-01T00:03:12.000Z
|
editortools/player.py
|
bennettdc/MCEdit-Unified
|
90abfb170c65b877ac67193e717fa3a3ded635dd
|
[
"0BSD"
] | 97 |
2015-01-02T01:31:12.000Z
|
2018-01-22T05:37:47.000Z
|
"""Copyright (c) 2010-2012 David Rio Vierra
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE."""
#-# Modifiedby D.C.-G. for translation purpose
from OpenGL import GL
import numpy
import os
from albow import TableView, TableColumn, Label, Button, Column, CheckBox, AttrRef, Row, ask, alert, input_text_buttons, TabPanel
from albow.table_view import TableRowView
from albow.translate import _
from config import config
from editortools.editortool import EditorTool
from editortools.tooloptions import ToolOptions
from glbackground import Panel
from glutils import DisplayList
from mceutils import loadPNGTexture, alertException, drawTerrainCuttingWire, drawCube
from operation import Operation
import pymclevel
from pymclevel.box import BoundingBox, FloatBox
from pymclevel import nbt
import logging
from player_cache import PlayerCache, ThreadRS
from nbtexplorer import loadFile, saveFile, NBTExplorerToolPanel
import pygame
log = logging.getLogger(__name__)
def okayAt63(level, pos):
"""blocks 63 or 64 must be occupied"""
# return level.blockAt(pos[0], 63, pos[2]) != 0 or level.blockAt(pos[0], 64, pos[2]) != 0
return True
def okayAboveSpawn(level, pos):
"""3 blocks above spawn must be open"""
return not any([level.blockAt(pos[0], pos[1] + i, pos[2]) for i in xrange(1, 4)])
def positionValid(level, pos):
try:
return okayAt63(level, pos) and okayAboveSpawn(level, pos)
except EnvironmentError:
return False
panel = None
markerLevel = None
#@property
#def statusText(self):
# if not self.panel:
# return ""
# player = self.panel.selectedPlayer
# if player == "Player":
# return "Click to move the player"
#
# return _("Click to move the player \"{0}\"").format(player)
| 39.632214 | 219 | 0.578836 |
914cca42f7c78c12fb45153e185381ce97dc5240
| 5,200 |
py
|
Python
|
seismic/checkpointing/checkpoint.py
|
slimgroup/Devito-Examples
|
449e1286a18ebc4172069372ba2bf3cd2ec99a2f
|
[
"MIT"
] | 7 |
2020-08-19T18:23:08.000Z
|
2022-02-18T19:19:24.000Z
|
seismic/checkpointing/checkpoint.py
|
slimgroup/Devito-Examples
|
449e1286a18ebc4172069372ba2bf3cd2ec99a2f
|
[
"MIT"
] | null | null | null |
seismic/checkpointing/checkpoint.py
|
slimgroup/Devito-Examples
|
449e1286a18ebc4172069372ba2bf3cd2ec99a2f
|
[
"MIT"
] | 3 |
2020-12-01T22:17:09.000Z
|
2021-05-21T11:29:07.000Z
|
# The MIT License (MIT)
#
# Copyright (c) 2016, Imperial College, London
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation the rights to use, copy,
# modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from pyrevolve import Checkpoint, Operator
from devito import TimeFunction
from devito.tools import flatten
def get_symbol_data(symbol, timestep):
timestep += symbol.time_order - 1
ptrs = []
for i in range(symbol.time_order):
# Use `._data`, instead of `.data`, as `.data` is a view of the DOMAIN
# data region which is non-contiguous in memory. The performance hit from
# dealing with non-contiguous memory is so big (introduces >1 copy), it's
# better to checkpoint unneccesarry stuff to get a contiguous chunk of memory.
ptr = symbol._data[timestep - i, :, :]
ptrs.append(ptr)
return ptrs
| 44.067797 | 129 | 0.68 |
914cfd2421dd20bdadd6d7150cecf300e7699605
| 13,463 |
py
|
Python
|
lbrynet/file_manager/EncryptedFileManager.py
|
shyba/lbry
|
ab3278c50a8b7b5a8e9486a1c52be3d5e0c18297
|
[
"MIT"
] | 1 |
2018-12-08T04:42:11.000Z
|
2018-12-08T04:42:11.000Z
|
lbrynet/file_manager/EncryptedFileManager.py
|
mrlucky9/lbry
|
bf6bc02828ed55e98a3002f487041acbd7841883
|
[
"MIT"
] | null | null | null |
lbrynet/file_manager/EncryptedFileManager.py
|
mrlucky9/lbry
|
bf6bc02828ed55e98a3002f487041acbd7841883
|
[
"MIT"
] | null | null | null |
"""
Keep track of which LBRY Files are downloading and store their LBRY File specific metadata
"""
import logging
import os
from twisted.enterprise import adbapi
from twisted.internet import defer, task, reactor
from twisted.python.failure import Failure
from lbrynet.reflector.reupload import reflect_stream
from lbrynet.core.PaymentRateManager import NegotiatedPaymentRateManager
from lbrynet.file_manager.EncryptedFileDownloader import ManagedEncryptedFileDownloader
from lbrynet.file_manager.EncryptedFileDownloader import ManagedEncryptedFileDownloaderFactory
from lbrynet.lbry_file.StreamDescriptor import EncryptedFileStreamType
from lbrynet.cryptstream.client.CryptStreamDownloader import AlreadyStoppedError
from lbrynet.cryptstream.client.CryptStreamDownloader import CurrentlyStoppingError
from lbrynet.core.sqlite_helpers import rerun_if_locked
from lbrynet import conf
log = logging.getLogger(__name__)
def toggle_lbry_file_running(self, lbry_file):
"""Toggle whether a stream reader is currently running"""
for l in self.lbry_files:
if l == lbry_file:
return l.toggle_running()
return defer.fail(Failure(ValueError("Could not find that LBRY file")))
def _reflect_lbry_files(self):
for lbry_file in self.lbry_files:
yield reflect_stream(lbry_file)
def get_count_for_stream_hash(self, stream_hash):
return self._get_count_for_stream_hash(stream_hash)
######### database calls #########
| 40.18806 | 100 | 0.672733 |
914dad243b4f6fd43e52b214d9db3b5771ad2444
| 623 |
py
|
Python
|
Perforce/AppUtils.py
|
TomMinor/MayaPerforce
|
52182c7e5c3e91e41973d0c2abbda8880e809e49
|
[
"MIT"
] | 13 |
2017-03-31T21:52:19.000Z
|
2021-09-06T23:15:30.000Z
|
Perforce/AppUtils.py
|
TomMinor/MayaPerforce
|
52182c7e5c3e91e41973d0c2abbda8880e809e49
|
[
"MIT"
] | 3 |
2017-05-08T02:27:43.000Z
|
2017-05-10T03:20:11.000Z
|
Perforce/AppUtils.py
|
TomMinor/MayaPerforce
|
52182c7e5c3e91e41973d0c2abbda8880e809e49
|
[
"MIT"
] | 3 |
2017-05-05T14:03:03.000Z
|
2020-05-25T10:25:04.000Z
|
import os
import sys
import re
import logging
p4_logger = logging.getLogger("Perforce")
# Import app specific utilities, maya opens scenes differently than nuke etc
# Are we in maya or nuke?
if re.match( "maya", os.path.basename( sys.executable ), re.I ):
p4_logger.info("Configuring for Maya")
from MayaUtils import *
elif re.match( "nuke", os.path.basename( sys.executable ), re.I ):
p4_logger.info("Configuring for Nuke")
from NukeUtils import *
else:
p4_logger.warning("Couldn't find app configuration")
raise ImportError("No supported applications found that this plugin can interface with")
| 32.789474 | 90 | 0.738363 |
914ea6fbc1fedc5c88691906b2f1c1f56a6d040c
| 5,907 |
py
|
Python
|
fhir/immunizations_demo/models/trainer/model.py
|
kourtneyshort/healthcare
|
1d1e2375304ac99f43a8b6aee7374fcdf641eb6f
|
[
"Apache-2.0"
] | null | null | null |
fhir/immunizations_demo/models/trainer/model.py
|
kourtneyshort/healthcare
|
1d1e2375304ac99f43a8b6aee7374fcdf641eb6f
|
[
"Apache-2.0"
] | 22 |
2019-12-16T22:18:37.000Z
|
2022-03-12T00:04:43.000Z
|
fhir/immunizations_demo/models/trainer/model.py
|
kourtneyshort/healthcare
|
1d1e2375304ac99f43a8b6aee7374fcdf641eb6f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""A simple logistics regression model for immunization prediction.
The following features are used in this model:
1. age of the patient
2. gender of the patient
3. country the patient is visiting
4. expected duration of stay
5. disease
We are predicting the possibility of the patient getting a disease.
Note that this model is part of an end-to-end demo which shows how
to leverage the Google Cloud Healthcare APIs (FHIR APIs specifically)
to finish data analysis and machine learning tasks. This problem
itself is not a natural machine learning task.
"""
import tensorflow as tf
from functools import reduce
# Input data specific flags.
tf.flags.DEFINE_string("training_data", default=None,
help="Path to training data. This should be a GCS path.")
tf.flags.DEFINE_string("eval_data", default=None,
help="Path to evaluation data. This should be a GCS path.")
# Model specific flags. See more details here:
# https://www.tensorflow.org/api_docs/python/tf/estimator/LinearClassifier
tf.flags.DEFINE_string("model_dir", default=None,
help="Estimator model_dir.")
tf.flags.DEFINE_string("export_model_dir", default=None,
help="Folder to export trained model.")
tf.flags.DEFINE_integer("batch_size", default=96,
help="Mini-batch size for the training.")
tf.flags.DEFINE_integer("training_steps", default=1000,
help="Total number of training steps.")
tf.flags.DEFINE_integer("eval_steps", default=100,
help="Total number of evaluation steps.")
tf.flags.DEFINE_integer("n_classes", default=2,
help="Number of categories to classify to.")
# More advanced flags that controls the behavior of FTRL optimizer.
# See more details here:
# https://www.tensorflow.org/api_docs/python/tf/train/FtrlOptimizer
tf.flags.DEFINE_float("learning_rate", default=0.01,
help="Learning rate")
tf.flags.DEFINE_float("l1_regularization_strength", default=0.005,
help="L1 regularization strength for FTRL optimizer.")
tf.flags.DEFINE_float("l2_regularization_strength", default=0.001,
help="L2 regularization strength for FTRL optimizer.")
FLAGS = tf.flags.FLAGS
# Feature and label keys.
FEATURE_KEYS = ['age', 'gender', 'country', 'duration', 'disease']
LABEL_KEYS = ['risk']
DS_BUFFER_SIZE = 50000
def build_input_fn(filename):
"""Builds the input funciton for training/evaluation.
Args:
filename (string): The path of the file that contains features and
labels. This can be a Google Cloud Storage path (e.g. gs://...).
"""
def input_fn():
"""Input function to be used by the classifier."""
def parse(serialized_example):
"""Parses a single tensorflow example."""
data = tf.parse_single_example(serialized_example,
features=reduce(parse_feature, FEATURE_KEYS + LABEL_KEYS, {}))
features = [tf.convert_to_tensor(tf.cast(data[key], tf.int32))
for key in FEATURE_KEYS]
labels = [tf.convert_to_tensor(tf.cast(data[key], tf.int32))
for key in LABEL_KEYS]
return features, labels
dataset = tf.data.TFRecordDataset(filename, buffer_size=DS_BUFFER_SIZE)
dataset = dataset.map(parse).cache().repeat()
dataset = dataset.batch(FLAGS.batch_size)
features, labels = dataset.make_one_shot_iterator().get_next()
# Slice features into a dictionary which is expected by the classifier.
features = tf.transpose(features)
def map_feature(dict, idx):
"""Maps individual features into a dictionary."""
dict[FEATURE_KEYS[idx]] = tf.transpose(
tf.nn.embedding_lookup(features, [idx]))
return dict
return reduce(map_feature, list(range(len(FEATURE_KEYS))), {}), labels
return input_fn
def build_serving_input_receiver_fn():
"""Builds a serving_input_receiver_fn which takes JSON as input."""
return serving_input_receiver_fn
if __name__ == '__main__':
# Set logging level to INFO.
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run()
| 34.54386 | 78 | 0.744033 |
914f974905119aa6df33b733c4b0cd0e4954c272
| 15,147 |
py
|
Python
|
heliosburn/django/hbproject/webui/models.py
|
thecodeteam/heliosburn
|
513f6335c9788948d82e5c9285d7869f3ff4cc10
|
[
"MIT"
] | null | null | null |
heliosburn/django/hbproject/webui/models.py
|
thecodeteam/heliosburn
|
513f6335c9788948d82e5c9285d7869f3ff4cc10
|
[
"MIT"
] | null | null | null |
heliosburn/django/hbproject/webui/models.py
|
thecodeteam/heliosburn
|
513f6335c9788948d82e5c9285d7869f3ff4cc10
|
[
"MIT"
] | 1 |
2020-09-17T18:19:05.000Z
|
2020-09-17T18:19:05.000Z
|
import json
import re
from django.conf import settings
import requests
from webui.exceptions import BadRequestException, UnauthorizedException, ServerErrorException, RedirectException, \
UnexpectedException, LocationHeaderNotFoundException, NotFoundException
| 39.139535 | 115 | 0.642239 |
91504bbaea6d8835c5bee931052df81b48164c98
| 8,305 |
py
|
Python
|
src/ychaos/core/verification/controller.py
|
sushilkar/ychaos
|
6801390f0faf553789e3384440a72a0705310738
|
[
"Apache-2.0"
] | null | null | null |
src/ychaos/core/verification/controller.py
|
sushilkar/ychaos
|
6801390f0faf553789e3384440a72a0705310738
|
[
"Apache-2.0"
] | null | null | null |
src/ychaos/core/verification/controller.py
|
sushilkar/ychaos
|
6801390f0faf553789e3384440a72a0705310738
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021, Yahoo
# Licensed under the terms of the Apache 2.0 license. See the LICENSE file in the project root for terms
import time
from typing import Dict, List, Optional, Type
from pydantic import validate_arguments
from ...app_logger import AppLogger
from ...testplan import SystemState
from ...testplan.schema import TestPlan
from ...testplan.verification import VerificationConfig, VerificationType
from ...utils.hooks import EventHook
from ...utils.yaml import Dumper
from .data import VerificationData, VerificationStateData
from .plugins.BaseVerificationPlugin import BaseVerificationPlugin
from .plugins.HTTPRequestVerificationPlugin import (
HTTPRequestVerificationPlugin,
)
from .plugins.PythonModuleVerificationPlugin import (
PythonModuleVerificationPlugin,
)
from .plugins.SDv4VerificationPlugin import SDv4VerificationPlugin
# Enum value to corresponding Plugin Map
VERIFICATION_PLUGIN_MAP: Dict[str, Type[BaseVerificationPlugin]] = {
"python_module": PythonModuleVerificationPlugin,
"http_request": HTTPRequestVerificationPlugin,
"sdv4": SDv4VerificationPlugin,
}
| 37.40991 | 125 | 0.656352 |
9150df9e1d1933653f868e837eeb00ed20e37c8b
| 277 |
py
|
Python
|
tests/test_vimeodl.py
|
binary-signal/vimeo-channel-downloader
|
7c2ded9d07b2b698f4e52558ba7dc327c2827b6c
|
[
"BSD-3-Clause"
] | 6 |
2019-06-05T17:06:17.000Z
|
2020-02-16T15:11:11.000Z
|
tests/test_vimeodl.py
|
binary-signal/vimeo-channel-downloader
|
7c2ded9d07b2b698f4e52558ba7dc327c2827b6c
|
[
"BSD-3-Clause"
] | 1 |
2020-12-03T16:05:21.000Z
|
2020-12-03T16:05:21.000Z
|
tests/test_vimeodl.py
|
binary-signal/vimeo-channel-downloader
|
7c2ded9d07b2b698f4e52558ba7dc327c2827b6c
|
[
"BSD-3-Clause"
] | 3 |
2018-07-06T10:04:08.000Z
|
2019-06-05T17:07:25.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from vimeodl import __version__
from vimeodl.vimeo import VimeoLinkExtractor, VimeoDownloader
| 21.307692 | 61 | 0.722022 |
915138c1e205dea19655e55c824d89b847b800d5
| 6,160 |
py
|
Python
|
labgraph/graphs/node_test_harness.py
|
Yunusbcr/labgraph
|
a00ae7098b7b0e0eda8ce2e7e62dae86854616fb
|
[
"MIT"
] | 124 |
2021-07-14T21:25:59.000Z
|
2022-03-08T20:40:16.000Z
|
labgraph/graphs/node_test_harness.py
|
Yunusbcr/labgraph
|
a00ae7098b7b0e0eda8ce2e7e62dae86854616fb
|
[
"MIT"
] | 46 |
2021-07-16T18:41:11.000Z
|
2022-03-31T20:53:00.000Z
|
labgraph/graphs/node_test_harness.py
|
Yunusbcr/labgraph
|
a00ae7098b7b0e0eda8ce2e7e62dae86854616fb
|
[
"MIT"
] | 22 |
2021-07-16T18:34:56.000Z
|
2022-03-31T15:12:06.000Z
|
#!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import asyncio
import functools
import inspect
from contextlib import contextmanager
from typing import (
Any,
AsyncIterable,
Awaitable,
Callable,
Generic,
Iterator,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
)
from ..messages.message import Message
from ..util.testing import get_event_loop
from .config import Config
from .method import AsyncPublisher
from .node import Node
from .state import State
from .topic import Topic
N = TypeVar("N", bound=Node) # Node type
T = TypeVar("T", bound=Tuple[Topic, Message]) # Type yielded by async functions
def run_with_harness(node_type, fn, config=None, state=None, max_num_results=None):
"""
Runs an async function on a new node of the provided type using `NodeTestHarness`.
Args:
node_type: The type of node to create.
fn:
The async function to run. An instance of a node typed `node_type` will be
provided to the function as an argument.
config: The configuration to set on the node, if provided.
state: The state to set on the node, if provided.
max_num_results:
If `fn` is an async generator, the maximum number of results it will generate.
If this is `None`, then the generator can produce an unbounded number of
results.
"""
# Check whether the max_num_results argument was improperly provided
_check_max_num_results_arg(run_with_harness.__name__, fn, max_num_results)
test_harness = NodeTestHarness(node_type=node_type)
with test_harness.get_node(config=config, state=state) as node:
return run_async(fn, args=[node], max_num_results=max_num_results)
def run_async(fn, args=None, kwargs=None, max_num_results=None):
"""
Runs an async function to completion. Uses the current thread's event loop. Blocks
until the async function has finished executing. Forwards all arguments after `fn`
to the async function.
Args:
fn: The async function to run.
args: Positional arguments to forward to the function.
kwargs: Keyword arguments to forward to the function.
max_num_results:
If `fn` is an async generator, the maximum number of results it will generate.
If this is `None`, then the generator can produce an unbounded number of
results.
"""
# Check whether the max_num_results argument was improperly provided
_check_max_num_results_arg(run_async.__name__, fn, max_num_results)
# Unwrap functools.partial so we can check whether it is async
if isinstance(fn, functools.partial):
test_fn = fn.func
else:
test_fn = fn
if inspect.isasyncgenfunction(test_fn):
return get_event_loop().run_until_complete(
_async_generator_to_list(
fn=fn,
args=args or [],
kwargs=kwargs or {},
max_num_results=max_num_results,
)
)
elif asyncio.iscoroutinefunction(test_fn):
return get_event_loop().run_until_complete(fn(*(args or []), **(kwargs or {})))
else:
raise TypeError(f"{run_async.__name__}: function '{fn}' is not async")
def _check_max_num_results_arg(
called_fn_name: str,
fn: Union[Callable[..., Awaitable[Any]], Callable[..., AsyncIterable[Any]]],
max_num_results: Optional[int] = None,
) -> None:
if not inspect.isasyncgenfunction(fn) and max_num_results is not None:
raise TypeError(
f"{called_fn_name}: function '{fn}' is not an async generator but "
"max_num_results was provided"
)
| 30.49505 | 88 | 0.65276 |
9151eafe84027e81a61010f1c158d9786b978a93
| 837 |
py
|
Python
|
pygamelearning/lrud.py
|
edward70/2021Computing
|
df8fb818480a6e23f2eac736744294871ec0e38c
|
[
"MIT"
] | null | null | null |
pygamelearning/lrud.py
|
edward70/2021Computing
|
df8fb818480a6e23f2eac736744294871ec0e38c
|
[
"MIT"
] | null | null | null |
pygamelearning/lrud.py
|
edward70/2021Computing
|
df8fb818480a6e23f2eac736744294871ec0e38c
|
[
"MIT"
] | null | null | null |
import pygame
import sys
pygame.init()
clock = pygame.time.Clock()
screen = pygame.display.set_mode([500, 500])
gameOn = True
x1 = 0
y1 = 100
x2 = 100
y2 = 0
while gameOn == True:
screen.fill([255,255,255])
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if x1 == 500:
moveRight = False
elif x1 == 0:
moveRight = True
if y2 == 500:
moveDown = False
elif y2 == 0:
moveDown = True
if moveRight:
x1 = x1+1
else:
x1 = x1-1
if moveDown:
y2 = y2+1
else:
y2 = y2-1
pygame.draw.circle(screen, [0,0,0], [x1,y1], 10)
pygame.draw.rect(screen, [0,0,0], [x2,y2,30,30])
clock.tick(100)
pygame.display.flip()
pygame.quit()
| 17.808511 | 52 | 0.520908 |
91522a760e718a02b548df8a5987a17cb9ed54b7
| 3,198 |
py
|
Python
|
pytorch/xor/training_a_perceptron.py
|
e93fem/PyTorchNLPBook
|
c9ea9e0b3d1b8bba6a983b425c6c03dd79d3d6b0
|
[
"Apache-2.0"
] | null | null | null |
pytorch/xor/training_a_perceptron.py
|
e93fem/PyTorchNLPBook
|
c9ea9e0b3d1b8bba6a983b425c6c03dd79d3d6b0
|
[
"Apache-2.0"
] | null | null | null |
pytorch/xor/training_a_perceptron.py
|
e93fem/PyTorchNLPBook
|
c9ea9e0b3d1b8bba6a983b425c6c03dd79d3d6b0
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import torch
import matplotlib.pyplot as plt
from torch import optim, nn
from pytorch.xor.multilayer_perceptron import MultilayerPerceptron
from pytorch.xor.utils import LABELS, get_toy_data, visualize_results, plot_intermediate_representations
input_size = 2
output_size = len(set(LABELS))
num_hidden_layers = 0
hidden_size = 2 # isn't ever used but we still set it
seed = 24
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
mlp1 = MultilayerPerceptron(input_size=input_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
output_size=output_size)
print(mlp1)
batch_size = 1000
x_data_static, y_truth_static = get_toy_data(batch_size)
fig, ax = plt.subplots(1, 1, figsize=(10,5))
visualize_results(mlp1, x_data_static, y_truth_static,
ax=ax, title='Initial Perceptron State', levels=[0.5])
plt.axis('off')
plt.savefig('images/perceptron_initial.png')
plt.show()
losses = []
batch_size = 10000
n_batches = 10
max_epochs = 10
loss_change = 1.0
last_loss = 10.0
change_threshold = 1e-3
epoch = 0
all_imagefiles = []
lr = 0.01
optimizer = optim.Adam(params=mlp1.parameters(), lr=lr)
cross_ent_loss = nn.CrossEntropyLoss()
while not early_termination(loss_change, change_threshold, epoch, max_epochs):
for _ in range(n_batches):
# step 0: fetch the data
x_data, y_target = get_toy_data(batch_size)
# step 1: zero the gradients
mlp1.zero_grad()
# step 2: run the forward pass
y_pred = mlp1(x_data).squeeze()
# step 3: compute the loss
loss = cross_ent_loss(y_pred, y_target.long())
# step 4: compute the backward pass
loss.backward()
# step 5: have the optimizer take an optimization step
optimizer.step()
# auxillary: bookkeeping
loss_value = loss.item()
losses.append(loss_value)
loss_change = abs(last_loss - loss_value)
last_loss = loss_value
print("epoch: {}: loss_value: {}".format(epoch, loss_value))
fig, ax = plt.subplots(1, 1, figsize=(10, 5))
visualize_results(mlp1, x_data_static, y_truth_static, ax=ax, epoch=epoch,
title=f"{loss_value:0.2f}; {loss_change:0.4f}")
plt.axis('off')
epoch += 1
all_imagefiles.append(f'images/perceptron_epoch{epoch}_toylearning.png')
plt.savefig(all_imagefiles[-1])
_, ax = plt.subplots(1,1,figsize=(10,5))
visualize_results(mlp1, x_data_static, y_truth_static, epoch=None, levels=[0.5], ax=ax)
plt.axis('off');
plt.savefig('images/perceptron_final.png')
plot_intermediate_representations(mlp1,
"The Perceptron's Input and Intermediate Representation",
figsize=(9, 3))
plt.savefig("images/perceptron_intermediate.png")
plt.savefig("images/figure_4_5.pdf")
| 30.169811 | 104 | 0.688555 |
91539993c3d566be3d6ad8bdfd6ab2f85574f003
| 8,157 |
py
|
Python
|
mysite/api/v0/tests.py
|
raccoongang/socraticqs2
|
06201005136ee139846f857dbb2f518736e441de
|
[
"Apache-2.0"
] | 3 |
2015-11-20T07:33:28.000Z
|
2017-01-15T23:33:50.000Z
|
mysite/api/v0/tests.py
|
raccoongang/socraticqs2
|
06201005136ee139846f857dbb2f518736e441de
|
[
"Apache-2.0"
] | 28 |
2015-07-14T11:33:24.000Z
|
2017-11-17T15:21:22.000Z
|
mysite/api/v0/tests.py
|
raccoongang/socraticqs2
|
06201005136ee139846f857dbb2f518736e441de
|
[
"Apache-2.0"
] | 4 |
2015-04-29T09:04:59.000Z
|
2017-07-19T14:11:16.000Z
|
import json
import mock
from django.core.urlresolvers import reverse
from pymongo.errors import ServerSelectionTimeoutError
from analytics.models import CourseReport
from core.common.mongo import c_onboarding_status, _conn
from core.common import onboarding
from ct.models import UnitLesson, StudentError
from ctms.tests import MyTestCase
HEALTH_URL = reverse('api:v0:health-check')
def test_health_non_ok(client, db, mocker):
"""
Ping and Stats Mongo command return non ok results.
"""
do_health = mocker.patch('api.v0.views.do_health')
do_health.return_value = {}, {}
result = client.get(HEALTH_URL)
assert result.status_code == 503
def test_health_exception(client, db, mocker):
"""
Mongo query raises exception.
"""
do_health = mocker.patch('api.v0.views.do_health')
do_health.side_effect = ServerSelectionTimeoutError()
result = client.get(HEALTH_URL)
assert result.status_code == 503
| 32.369048 | 113 | 0.674635 |
9153c783ea6530b33a82747aab7d0a7d6aae69be
| 8,934 |
py
|
Python
|
signbank/settings/base.py
|
anthonymark33/Global-signbank
|
ae61984a24f1cc0801d4621c81b882154ce99098
|
[
"BSD-3-Clause"
] | null | null | null |
signbank/settings/base.py
|
anthonymark33/Global-signbank
|
ae61984a24f1cc0801d4621c81b882154ce99098
|
[
"BSD-3-Clause"
] | 2 |
2021-06-10T23:11:53.000Z
|
2021-12-13T20:44:56.000Z
|
signbank/settings/base.py
|
anthonymark33/Global-signbank
|
ae61984a24f1cc0801d4621c81b882154ce99098
|
[
"BSD-3-Clause"
] | null | null | null |
# Django settings for signbank project.
import os
from signbank.settings.server_specific import *
from datetime import datetime
DEBUG = True
PROJECT_DIR = os.path.dirname(BASE_DIR)
MANAGERS = ADMINS
TIME_ZONE = 'Europe/Amsterdam'
LOCALE_PATHS = [BASE_DIR+'conf/locale']
# in the database, SITE_ID 1 is example.com
SITE_ID = 2
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = WRITABLE_FOLDER
MEDIA_URL = PREFIX_URL+'/media/'
MEDIA_MOBILE_URL = MEDIA_URL
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = PREFIX_URL
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = PREFIX_URL+'/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, "media"),
)
# STATICFILES_STORAGE = ( os.path.join(PROJECT_DIR, "static"), )
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '^g=q21r_nnmbz49d!vs*2gvpll-y9b@&t3k2r3c$*u&2la5!%s'
MIDDLEWARE_CLASSES = (
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'signbank.pages.middleware.PageFallbackMiddleware',
# 'django_mobile.middleware.MobileDetectionMiddleware',
# 'django_mobile.middleware.SetFlavourMiddleware',
# 'debug_toolbar.middleware.DebugToolbarMiddleware',
'reversion.middleware.RevisionMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJECT_DIR, 'templates/' + SIGNBANK_VERSION_CODE + '-templates'),
os.path.join(PROJECT_DIR, 'signbank/registration/templates/')],
'OPTIONS': {
'context_processors': [
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"signbank.context_processors.url",
"signbank.pages.context_processors.menu",
# "django_mobile.context_processors.flavour",
],
'loaders': [
# 'django_mobile.loader.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
},
},
]
# add the Email backend to allow logins using email as username
AUTHENTICATION_BACKENDS = (
"signbank.registration.EmailBackend",
"django.contrib.auth.backends.ModelBackend",
'guardian.backends.ObjectPermissionBackend',
)
AUTH_PROFILE_MODULE = 'dictionary.UserProfile'
INTERNAL_IPS = ('127.0.0.1','131.174.132.138')
ROOT_URLCONF = 'signbank.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'signbank.wsgi.application'
INSTALLED_APPS = (
'modeltranslation',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.admindocs',
'django.contrib.staticfiles',
'bootstrap3',
'django_summernote',
# 'django_select2',
# 'easy_select2',
'signbank.dictionary',
'signbank.feedback',
#'signbank.registration',
'signbank.pages',
'signbank.attachments',
'signbank.video',
'reversion',
#'django_mobile',
'tagging',
'guardian',
#'debug_toolbar'
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# turn on lots of logging or not
DO_LOGGING = False
LOG_FILENAME = "debug.log"
SOUTH_TESTS_MIGRATE = False
## Application settings for signbank
## Settings controlling page contents
# do we implement safe search for anonymous users?
# if True, any gloss that is tagged lexis:crude will be removed from
# search results for users who are not logged in
ANON_SAFE_SEARCH = False
# do we show the tag based search for anonymous users?
ANON_TAG_SEARCH = False
# do we display the previous/next links to signs, requires gloss.sn to be used consistently
SIGN_NAVIGATION = False
# which definition fields do we show and in what order?
DEFINITION_FIELDS = ['general', 'noun', 'verb', 'interact', 'deictic', 'modifier', 'question', 'augment', 'note']
HANDSHAPE_RESULT_FIELDS = ['machine_value', 'english_name', 'dutch_name', 'chinese_name',
'hsFingSel', 'hsFingConf', 'hsFingSel2', 'hsFingConf2', 'hsFingUnsel', 'hsSpread', 'hsAperture']
# location and URL for uploaded files
UPLOAD_ROOT = MEDIA_ROOT + "upload/"
UPLOAD_URL = MEDIA_URL + "upload/"
# Location for comment videos relative to MEDIA_ROOT
COMMENT_VIDEO_LOCATION = "comments"
# Location for videos associated with pages
PAGES_VIDEO_LOCATION = 'pages'
# location for upload of videos relative to MEDIA_ROOT
# videos are stored here prior to copying over to the main
# storage location
VIDEO_UPLOAD_LOCATION = "upload"
# path to store uploaded attachments relative to MEDIA_ROOT
ATTACHMENT_LOCATION = 'attachments'
# which fields from the Gloss model should be included in the quick update form on the sign view
QUICK_UPDATE_GLOSS_FIELDS = ['signlanguage', 'dialect']
# should we always require a login for viewing dictionary content
ALWAYS_REQUIRE_LOGIN = True
# do we allow people to register for the site
ALLOW_REGISTRATION = True
ACCOUNT_ACTIVATION_DAYS = 7
# show the number signs page or an under construction page?
SHOW_NUMBERSIGNS = True
LOGIN_URL = PREFIX_URL+'/accounts/login/'
LOGIN_REDIRECT_URL = PREFIX_URL+'/signs/recently_added/'
# location of ffmpeg, used to convert uploaded videos
# FFMPEG_PROGRAM = "/Applications/ffmpegX.app/Contents/Resources/ffmpeg"
FFMPEG_TIMEOUT = 60
FFMPEG_OPTIONS = ["-vcodec", "h264", "-an"]
# defines the aspect ratio for videos
VIDEO_ASPECT_RATIO = 3.0/4.0
# settings for django-tagging
FORCE_LOWERCASE_TAGS = False
PRIMARY_CSS = "css/"+SIGNBANK_VERSION_CODE+"/main.css"
import mimetypes
mimetypes.add_type("video/mp4", ".mov", True)
# a list of tags we're allowed to use
XALLOWED_TAGS = [ '',
'workflow:needs video',
'workflow:redo video',
'workflow:problematic',
'corpus:attested',
'lexis:doubtlex',
'phonology:alternating',
'phonology:dominant hand only',
'phonology:double handed',
'phonology:forearm rotation',
'phonology:handshape change',
'phonology:onehand',
'phonology:parallel',
'phonology:symmetrical',
'phonology:two handed',
]
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
EARLIEST_GLOSS_CREATION_DATE = datetime(2015,1,1)
SUPPORTED_CITATION_IMAGE_EXTENSIONS = ['.jpg','.jpeg','.png']
MAXIMUM_UPLOAD_SIZE = 5000000
MINIMUM_OVERLAP_BETWEEN_SIGNING_HANDS_IN_CNGT = 40
DISABLE_MOVING_THUMBNAILS_ABOVE_NR_OF_GLOSSES = 200
DATA_UPLOAD_MAX_NUMBER_FIELDS = None
DATA_UPLOAD_MAX_MEMORY_SIZE = None
| 31.020833 | 123 | 0.694426 |
e66c3efb17fe57a58924ade4ac24258abd570c92
| 50,042 |
py
|
Python
|
ocs_ci/ocs/cluster.py
|
crombus/ocs-ci
|
20340365882bdd06ddb6cd65bbd7df0ba7e2c2d8
|
[
"MIT"
] | null | null | null |
ocs_ci/ocs/cluster.py
|
crombus/ocs-ci
|
20340365882bdd06ddb6cd65bbd7df0ba7e2c2d8
|
[
"MIT"
] | null | null | null |
ocs_ci/ocs/cluster.py
|
crombus/ocs-ci
|
20340365882bdd06ddb6cd65bbd7df0ba7e2c2d8
|
[
"MIT"
] | null | null | null |
"""
A module for all rook functionalities and abstractions.
This module has rook related classes, support for functionalities to work with
rook cluster. This works with assumptions that an OCP cluster is already
functional and proper configurations are made for interaction.
"""
import base64
import logging
import random
import re
import threading
import yaml
import time
import ocs_ci.ocs.resources.pod as pod
from ocs_ci.ocs.exceptions import UnexpectedBehaviour
from ocs_ci.ocs.resources import ocs, storage_cluster
import ocs_ci.ocs.constants as constant
from ocs_ci.ocs.resources.mcg import MCG
from ocs_ci.utility.retry import retry
from ocs_ci.utility.utils import (
TimeoutSampler,
run_cmd,
convert_device_size,
get_trim_mean,
)
from ocs_ci.ocs.utils import get_pod_name_by_pattern
from ocs_ci.framework import config
from ocs_ci.ocs import ocp, constants, exceptions
from ocs_ci.ocs.exceptions import PoolNotFound
from ocs_ci.ocs.resources.pvc import get_all_pvc_objs
logger = logging.getLogger(__name__)
def is_health_ok(self):
"""
Returns:
bool: True if "HEALTH_OK" else False
"""
self.cluster.reload()
return self.cluster.data["status"]["ceph"]["health"] == "HEALTH_OK"
def cluster_health_check(self, timeout=None):
"""
Check overall cluster health.
Relying on health reported by CephCluster.get()
Args:
timeout (int): in seconds. By default timeout value will be scaled
based on number of ceph pods in the cluster. This is just a
crude number. Its been observed that as the number of pods
increases it takes more time for cluster's HEALTH_OK.
Returns:
bool: True if "HEALTH_OK" else False
Raises:
CephHealthException: if cluster is not healthy
"""
# Scale timeout only if user hasn't passed any value
timeout = timeout or (10 * len(self.pods))
sample = TimeoutSampler(timeout=timeout, sleep=3, func=self.is_health_ok)
if not sample.wait_for_func_status(result=True):
raise exceptions.CephHealthException("Cluster health is NOT OK")
# This way of checking health of different cluster entities and
# raising only CephHealthException is not elegant.
# TODO: add an attribute in CephHealthException, called "reason"
# which should tell because of which exact cluster entity health
# is not ok ?
expected_mon_count = self.mon_count
expected_mds_count = self.mds_count
self.scan_cluster()
try:
self.mon_health_check(expected_mon_count)
except exceptions.MonCountException as e:
logger.error(e)
raise exceptions.CephHealthException("Cluster health is NOT OK")
try:
if not expected_mds_count:
pass
else:
self.mds_health_check(expected_mds_count)
except exceptions.MDSCountException as e:
logger.error(e)
raise exceptions.CephHealthException("Cluster health is NOT OK")
# TODO: OSD and MGR health check
logger.info("Cluster HEALTH_OK")
# This scan is for reconcilation on *.count
# because during first scan in this function some of the
# pods may not be up and would have set count to lesser number
self.scan_cluster()
# Check Noobaa health
self.wait_for_noobaa_health_ok()
def noobaa_health_check(self):
"""
Check Noobaa health
"""
if not self.mcg_obj.status:
raise exceptions.NoobaaHealthException("Cluster health is NOT OK")
def wait_for_noobaa_health_ok(self, tries=60, delay=5):
"""
Wait for Noobaa health to be OK
"""
return retry(
exceptions.NoobaaHealthException, tries=tries, delay=delay, backoff=1
)(self.noobaa_health_check)()
def mon_change_count(self, new_count):
"""
Change mon count in the cluster
Args:
new_count(int): Absolute number of mons required
"""
self.cluster.reload()
self.cluster.data["spec"]["mon"]["count"] = new_count
logger.info(self.cluster.data)
self.cluster.apply(**self.cluster.data)
self.mon_count = new_count
self.cluster_health_check()
logger.info(f"Mon count changed to {new_count}")
self.cluster.reload()
def mon_health_check(self, count):
"""
Mon health check based on pod count
Args:
count (int): Expected number of mon pods
Raises:
MonCountException: if mon pod count doesn't match
"""
timeout = 10 * len(self.pods)
logger.info(f"Expected MONs = {count}")
try:
assert self.POD.wait_for_resource(
condition="Running",
selector=self.mon_selector,
resource_count=count,
timeout=timeout,
sleep=3,
)
# TODO: Workaround for BZ1748325:
actual_mons = pod.get_mon_pods()
actual_running_mons = list()
for mon in actual_mons:
if mon.ocp.get_resource_status(mon.name) == constant.STATUS_RUNNING:
actual_running_mons.append(mon)
actual = len(actual_running_mons)
# TODO: End of workaround for BZ1748325
assert count == actual, f"Expected {count}, Got {actual}"
except exceptions.TimeoutExpiredError as e:
logger.error(e)
raise exceptions.MonCountException(
f"Failed to achieve desired Mon count" f" {count}"
)
def mds_change_count(self, new_count):
"""
Change mds count in the cluster
Args:
new_count(int): Absolute number of active mdss required
"""
self.cephfs.data["spec"]["metadataServer"]["activeCount"] = new_count
self.cephfs.apply(**self.cephfs.data)
logger.info(f"MDS active count changed to {new_count}")
if self.cephfs.data["spec"]["metadataServer"]["activeStandby"]:
expected = new_count * 2
else:
expected = new_count
self.mds_count = expected
self.cluster_health_check()
self.cephfs.reload()
def mds_health_check(self, count):
"""
MDS health check based on pod count
Args:
count (int): number of pods expected
Raises:
MDACountException: if pod count doesn't match
"""
timeout = 10 * len(self.pods)
try:
assert self.POD.wait_for_resource(
condition="Running",
selector=self.mds_selector,
resource_count=count,
timeout=timeout,
sleep=3,
)
except AssertionError as e:
logger.error(e)
raise exceptions.MDSCountException(
f"Failed to achieve desired MDS count" f" {count}"
)
def get_admin_key(self):
"""
Returns:
adminkey (str): base64 encoded key
"""
return self.get_user_key("client.admin")
def set_noout(self):
"""
Set noout flag for maintainance
"""
self.toolbox.exec_cmd_on_pod("ceph osd set noout")
def unset_noout(self):
"""
unset noout flag for peering
"""
self.toolbox.exec_cmd_on_pod("ceph osd unset noout")
def get_user_key(self, user):
"""
Args:
user (str): ceph username ex: client.user1
Returns:
key (str): base64 encoded user key
"""
out = self.toolbox.exec_cmd_on_pod(f"ceph auth get-key {user} --format json")
if "ENOENT" in out:
return False
key_base64 = base64.b64encode(out["key"].encode()).decode()
return key_base64
def create_user(self, username, caps):
"""
Create a ceph user in the cluster
Args:
username (str): ex client.user1
caps (str): ceph caps ex: mon 'allow r' osd 'allow rw'
Return:
return value of get_user_key()
"""
cmd = f"ceph auth add {username} {caps}"
# As of now ceph auth command gives output to stderr
# To be handled
out = self.toolbox.exec_cmd_on_pod(cmd)
logging.info(type(out))
return self.get_user_key(username)
def get_mons_from_cluster(self):
"""
Getting the list of mons from the cluster
Returns:
available_mon (list): Returns the mons from the cluster
"""
ret = self.DEP.get(
resource_name="", out_yaml_format=False, selector="app=rook-ceph-mon"
)
available_mon = re.findall(r"[\w-]+mon-+[\w-]", ret)
return available_mon
def remove_mon_from_cluster(self):
"""
Removing the mon pod from deployment
Returns:
remove_mon(bool): True if removal of mon is successful, False otherwise
"""
mons = self.get_mons_from_cluster()
after_delete_mon_count = len(mons) - 1
random_mon = random.choice(mons)
remove_mon = self.DEP.delete(resource_name=random_mon)
assert self.POD.wait_for_resource(
condition=constant.STATUS_RUNNING,
resource_count=after_delete_mon_count,
selector="app=rook-ceph-mon",
)
logging.info(f"Removed the mon {random_mon} from the cluster")
return remove_mon
def get_ceph_health(self, detail=False):
"""
Exec `ceph health` cmd on tools pod and return the status of the ceph
cluster.
Args:
detail (bool): If True the 'ceph health detail' is executed
Returns:
str: Output of the ceph health command.
"""
ceph_health_cmd = "ceph health"
if detail:
ceph_health_cmd = f"{ceph_health_cmd} detail"
return self.toolbox.exec_cmd_on_pod(
ceph_health_cmd,
out_yaml_format=False,
)
def get_ceph_status(self, format=None):
"""
Exec `ceph status` cmd on tools pod and return its output.
Args:
format (str) : Format of the output (e.g. json-pretty, json, plain)
Returns:
str: Output of the ceph status command.
"""
cmd = "ceph status"
if format:
cmd += f" -f {format}"
return self.toolbox.exec_cmd_on_pod(cmd, out_yaml_format=False)
def get_ceph_capacity(self):
"""
The function gets the total mount of storage capacity of the ocs cluster.
the calculation is <Num of OSD> * <OSD size> / <replica number>
it will not take into account the current used capacity.
Returns:
int : Total storage capacity in GiB (GiB is for development environment)
"""
storage_cluster_obj = storage_cluster.StorageCluster(
resource_name=config.ENV_DATA["storage_cluster_name"],
namespace=config.ENV_DATA["cluster_namespace"],
)
replica = int(
storage_cluster_obj.data["spec"]["storageDeviceSets"][0]["replica"]
)
ceph_pod = pod.get_ceph_tools_pod()
ceph_status = ceph_pod.exec_ceph_cmd(ceph_cmd="ceph df")
usable_capacity = (
int(ceph_status["stats"]["total_bytes"]) / replica / constant.GB
)
return usable_capacity
def get_ceph_cluster_iops(self):
"""
The function gets the IOPS from the ocs cluster
Returns:
Total IOPS in the cluster
"""
ceph_pod = pod.get_ceph_tools_pod()
ceph_status = ceph_pod.exec_ceph_cmd(ceph_cmd="ceph status")
read_ops = ceph_status["pgmap"]["read_op_per_sec"]
write_ops = ceph_status["pgmap"]["write_op_per_sec"]
cluster_iops = read_ops + write_ops
return cluster_iops
def get_iops_percentage(self, osd_size=2):
"""
The function calculates the IOPS percentage
of the cluster depending on number of osds in the cluster
Args:
osd_size (int): Size of 1 OSD in Ti
Returns:
IOPS percentage of the OCS cluster
"""
osd_count = count_cluster_osd()
iops_per_osd = osd_size * constants.IOPS_FOR_1TiB_OSD
iops_in_cluster = self.get_ceph_cluster_iops()
osd_iops_limit = iops_per_osd * osd_count
iops_percentage = (iops_in_cluster / osd_iops_limit) * 100
logging.info(f"The IOPS percentage of the cluster is {iops_percentage}%")
return iops_percentage
def get_cluster_throughput(self):
"""
Function to get the throughput of ocs cluster
Returns:
float: The write throughput of the cluster in MiB/s
"""
ceph_status = self.get_ceph_status()
for item in ceph_status.split("\n"):
if "client" in item:
throughput_data = item.strip("client: ").split(",")
throughput_data = throughput_data[:2:1]
# Converting all B/s and KiB/s to MiB/s
throughput = 0
for val in throughput_data:
throughput += [
float(re.findall(r"\d+", val)[0]) * constants.TP_CONVERSION[key]
for key in constants.TP_CONVERSION.keys()
if key in val
][0]
logger.info(
f"The {val[-2:].upper()} throughput is {throughput} MiB/s"
)
return throughput
def get_throughput_percentage(self):
"""
Function to get throughput percentage of the ocs cluster
Returns:
Throughput percentage of the cluster
"""
throughput_of_cluster = self.get_cluster_throughput()
throughput_percentage = (
throughput_of_cluster / constants.THROUGHPUT_LIMIT_OSD
) * 100
logging.info(
f"The throughput percentage of the cluster is {throughput_percentage}%"
)
return throughput_percentage
def calc_trim_mean_throughput(self, samples=8):
"""
Calculate the cluster average throughput out of a few samples
Args:
samples (int): The number of samples to take
Returns:
float: The average cluster throughput
"""
throughput_vals = [self.get_cluster_throughput() for _ in range(samples)]
return round(get_trim_mean(throughput_vals), 3)
def get_rebalance_status(self):
"""
This function gets the rebalance status
Returns:
bool: True if rebalance is completed, False otherwise
"""
ceph_pod = pod.get_ceph_tools_pod()
ceph_status = ceph_pod.exec_ceph_cmd(ceph_cmd="ceph status")
ceph_health = ceph_pod.exec_ceph_cmd(ceph_cmd="ceph health")
total_pg_count = ceph_status["pgmap"]["num_pgs"]
pg_states = ceph_status["pgmap"]["pgs_by_state"]
logger.info(ceph_health)
logger.info(pg_states)
for states in pg_states:
return (
states["state_name"] == "active+clean"
and states["count"] == total_pg_count
)
def wait_for_rebalance(self, timeout=600):
"""
Wait for re-balance to complete
Args:
timeout (int): Time to wait for the completion of re-balance
Returns:
bool: True if rebalance completed, False otherwise
"""
try:
for rebalance in TimeoutSampler(
timeout=timeout, sleep=10, func=self.get_rebalance_status
):
if rebalance:
logging.info("Re-balance is completed")
return True
except exceptions.TimeoutExpiredError:
logger.error(
f"Data re-balance failed to complete within the given "
f"timeout of {timeout} seconds"
)
return False
def time_taken_to_complete_rebalance(self, timeout=600):
"""
This function calculates the time taken to complete
rebalance
Args:
timeout (int): Time to wait for the completion of rebalance
Returns:
int : Time taken in minutes for the completion of rebalance
"""
start_time = time.time()
assert self.wait_for_rebalance(timeout=timeout), (
f"Data re-balance failed to complete within the given "
f"timeout of {timeout} seconds"
)
time_taken = time.time() - start_time
return time_taken / 60
class CephHealthMonitor(threading.Thread):
"""
Context manager class for monitoring ceph health status of CephCluster.
If CephCluster will get to HEALTH_ERROR state it will save the ceph status
to health_error_status variable and will stop monitoring.
"""
def __init__(self, ceph_cluster, sleep=5):
"""
Constructor for ceph health status thread.
Args:
ceph_cluster (CephCluster): Reference to CephCluster object.
sleep (int): Number of seconds to sleep between health checks.
"""
self.ceph_cluster = ceph_cluster
self.sleep = sleep
self.health_error_status = None
self.health_monitor_enabled = False
self.latest_health_status = None
super(CephHealthMonitor, self).__init__()
def __exit__(self, exception_type, value, traceback):
"""
Exit method for context manager
Raises:
CephHealthException: If no other exception occurred during
execution of context manager and HEALTH_ERROR is detected
during the monitoring.
exception_type: In case of exception raised during processing of
the context manager.
"""
self.health_monitor_enabled = False
if self.health_error_status:
self.log_error_status()
if exception_type:
raise exception_type.with_traceback(value, traceback)
if self.health_error_status:
raise exceptions.CephHealthException(
f"During monitoring of Ceph health status hit HEALTH_ERROR: "
f"{self.health_error_status}"
)
return True
def validate_ocs_pods_on_pvc(pods, pvc_names):
"""
Validate if ocs pod has PVC. This validation checking if there is the pvc
like: rook-ceph-mon-a for the pod rook-ceph-mon-a-56f67f5968-6j4px.
Args:
pods (list): OCS pod names
pvc_names (list): names of all PVCs
Raises:
AssertionError: If no PVC found for one of the pod
"""
logger.info(f"Validating if each pod from: {pods} has PVC from {pvc_names}.")
for pod_name in pods:
found_pvc = ""
for pvc in pvc_names:
if pvc in pod_name:
found_pvc = pvc
if found_pvc:
logger.info(f"PVC {found_pvc} found for pod {pod_name}")
continue
assert found_pvc, f"No PVC found for pod: {pod_name}!"
def validate_cluster_on_pvc():
"""
Validate creation of PVCs for MON and OSD pods.
Also validate that those PVCs are attached to the OCS pods
Raises:
AssertionError: If PVC is not mounted on one or more OCS pods
"""
# Get the PVCs for selected label (MON/OSD)
ns = config.ENV_DATA["cluster_namespace"]
ocs_pvc_obj = get_all_pvc_objs(namespace=ns)
# Check all pvc's are in bound state
pvc_names = []
for pvc_obj in ocs_pvc_obj:
if pvc_obj.name.startswith(
constants.DEFAULT_DEVICESET_PVC_NAME
) or pvc_obj.name.startswith(constants.DEFAULT_MON_PVC_NAME):
assert (
pvc_obj.status == constants.STATUS_BOUND
), f"PVC {pvc_obj.name} is not Bound"
logger.info(f"PVC {pvc_obj.name} is in Bound state")
pvc_names.append(pvc_obj.name)
mon_pods = get_pod_name_by_pattern("rook-ceph-mon", ns)
if not config.DEPLOYMENT.get("local_storage"):
logger.info("Validating all mon pods have PVC")
validate_ocs_pods_on_pvc(mon_pods, pvc_names)
else:
logger.debug(
"Skipping validation if all mon pods have PVC because in LSO "
"deployment we don't have mon pods backed by PVC"
)
logger.info("Validating all osd pods have PVC")
osd_deviceset_pods = get_pod_name_by_pattern(
"rook-ceph-osd-prepare-ocs-deviceset", ns
)
validate_ocs_pods_on_pvc(osd_deviceset_pods, pvc_names)
osd_pods = get_pod_name_by_pattern("rook-ceph-osd", ns, filter="prepare")
for ceph_pod in mon_pods + osd_pods:
out = run_cmd(f"oc -n {ns} get pods {ceph_pod} -o yaml")
out_yaml = yaml.safe_load(out)
for vol in out_yaml["spec"]["volumes"]:
if vol.get("persistentVolumeClaim"):
claimName = vol.get("persistentVolumeClaim").get("claimName")
logger.info(f"{ceph_pod} backed by pvc {claimName}")
assert claimName in pvc_names, "Ceph Internal Volume not backed by PVC"
def count_cluster_osd():
"""
The function returns the number of cluster OSDs
Returns:
osd_count (int): number of OSD pods in current cluster
"""
storage_cluster_obj = storage_cluster.StorageCluster(
resource_name=config.ENV_DATA["storage_cluster_name"],
namespace=config.ENV_DATA["cluster_namespace"],
)
storage_cluster_obj.reload_data()
osd_count = int(
storage_cluster_obj.data["spec"]["storageDeviceSets"][0]["count"]
) * int(storage_cluster_obj.data["spec"]["storageDeviceSets"][0]["replica"])
return osd_count
def validate_pdb_creation():
"""
Validate creation of PDBs for MON, MDS and OSD pods.
Raises:
AssertionError: If required PDBs were not created.
"""
pdb_obj = ocp.OCP(kind="PodDisruptionBudget")
item_list = pdb_obj.get().get("items")
pdb_list = [item["metadata"]["name"] for item in item_list]
osd_count = count_cluster_osd()
pdb_required = [constants.MDS_PDB, constants.MON_PDB]
for num in range(osd_count):
pdb_required.append(constants.OSD_PDB + str(num))
pdb_list.sort()
pdb_required.sort()
for required, given in zip(pdb_required, pdb_list):
assert required == given, f"{required} was not created"
logger.info(f"All required PDBs created: {pdb_required}")
def get_osd_utilization():
"""
Get osd utilization value
Returns:
osd_filled (dict): Dict of osd name and its used value
i.e {'osd.1': 15.276289408185841, 'osd.0': 15.276289408185841, 'osd.2': 15.276289408185841}
"""
osd_filled = {}
ceph_cmd = "ceph osd df"
ct_pod = pod.get_ceph_tools_pod()
output = ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd)
for osd in output.get("nodes"):
osd_filled[osd["name"]] = osd["utilization"]
return osd_filled
def get_ceph_df_detail():
"""
Get ceph osd df detail
Returns:
dict: 'ceph df details' command output
"""
ceph_cmd = "ceph df detail"
ct_pod = pod.get_ceph_tools_pod()
return ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd)
def validate_replica_data(pool_name, replica):
"""
Check if data is replica 2 or 3
Args:
replica (int): size of the replica(2,3)
pool_name (str): name of the pool to check replica
Returns:
Bool: True if replicated data size is meet rep config and False if dont
"""
ceph_df_detail_output = get_ceph_df_detail()
pool_list = ceph_df_detail_output.get("pools")
for pool in pool_list:
if pool.get("name") == pool_name:
logger.info(f"{pool_name}")
stored = pool["stats"]["stored"]
byte_used = pool["stats"]["bytes_used"]
compress_bytes_used = pool["stats"]["compress_bytes_used"]
compress_under_bytes = pool["stats"]["compress_under_bytes"]
byte_used = byte_used + compress_under_bytes - compress_bytes_used
store_ratio = byte_used / stored
if (replica + 0.2) > store_ratio > (replica - 0.2):
logger.info(f"pool {pool_name} meet rep {replica} size")
return True
else:
logger.info(
f"pool {pool_name} meet do not meet rep {replica}"
f" size Store ratio is {store_ratio}"
)
return False
raise PoolNotFound(f"Pool {pool_name} not found on cluster")
def validate_compression(pool_name):
"""
Check if data was compressed
Args:
pool_name (str): name of the pool to check replica
Returns:
bool: True if compression works. False if not
"""
ceph_df_detail_output = get_ceph_df_detail()
pool_list = ceph_df_detail_output.get("pools")
for pool in pool_list:
if pool.get("name") == pool_name:
logger.info(f"{pool_name}")
byte_used = pool["stats"]["bytes_used"]
compress_bytes_used = pool["stats"]["compress_bytes_used"]
compress_under_bytes = pool["stats"]["compress_under_bytes"]
all_byte_used = byte_used + compress_under_bytes - compress_bytes_used
compression_ratio = byte_used / all_byte_used
logger.info(f"this is the comp_ratio {compression_ratio}")
if 0.6 < compression_ratio:
logger.info(
f"Compression ratio {compression_ratio} is " f"larger than 0.6"
)
return True
else:
logger.info(
f"Compression ratio {compression_ratio} is " f"smaller than 0.6"
)
return False
raise PoolNotFound(f"Pool {pool_name} not found on cluster")
def validate_osd_utilization(osd_used=80):
"""
Validates osd utilization matches osd_used value
Args:
osd_used (int): osd used value
Returns:
bool: True if all osd values is equal or greater to osd_used.
False Otherwise.
"""
_rc = True
osd_filled = get_osd_utilization()
for osd, value in osd_filled.items():
if int(value) >= osd_used:
logger.info(f"{osd} used value {value}")
else:
_rc = False
logger.warning(f"{osd} used value {value}")
return _rc
def get_pgs_per_osd():
"""
Function to get ceph pg count per OSD
Returns:
osd_dict (dict): Dict of osd name and its used value
i.e {'osd.0': 136, 'osd.2': 136, 'osd.1': 136}
"""
osd_dict = {}
ceph_cmd = "ceph osd df"
ct_pod = pod.get_ceph_tools_pod()
output = ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd)
for osd in output.get("nodes"):
osd_dict[osd["name"]] = osd["pgs"]
return osd_dict
def get_balancer_eval():
"""
Function to get ceph pg balancer eval value
Returns:
eval_out (float): Eval output of pg balancer
"""
ceph_cmd = "ceph balancer eval"
ct_pod = pod.get_ceph_tools_pod()
eval_out = ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd).split(" ")
return float(eval_out[3])
def get_pg_balancer_status():
"""
Function to check pg_balancer active and mode is upmap
Returns:
bool: True if active and upmap is set else False
"""
# Check either PG balancer is active or not
ceph_cmd = "ceph balancer status"
ct_pod = pod.get_ceph_tools_pod()
output = ct_pod.exec_ceph_cmd(ceph_cmd=ceph_cmd)
# Check 'mode' is 'upmap', based on suggestion from Ceph QE
# TODO: Revisit this if mode needs change.
if output["active"] and output["mode"] == "upmap":
logging.info("PG balancer is active and mode is upmap")
return True
else:
logging.error("PG balancer is not active")
return False
def validate_pg_balancer():
"""
Validate either data is equally distributed to OSDs
Returns:
bool: True if avg PG's per osd difference is <=10 else False
"""
# Check OSD utilization either pg balancer is active
# TODO: Revisit this if pg difference value needs change
# TODO: Revisit eval value if pg balancer mode changes from 'upmap'
if get_pg_balancer_status():
eval = get_balancer_eval()
osd_dict = get_pgs_per_osd()
osd_avg_pg_value = round(sum(osd_dict.values()) / len(osd_dict))
osd_pg_value_flag = True
for key, value in osd_dict.items():
diff = abs(value - osd_avg_pg_value)
if diff <= 10:
logging.info(f"{key} PG difference {diff} is acceptable")
else:
logging.error(f"{key} PG difference {diff} is not acceptable")
osd_pg_value_flag = False
if osd_pg_value_flag and eval <= 0.025:
logging.info(
f"Eval value is {eval} and pg distribution "
f"average difference is <=10 which is acceptable"
)
return True
else:
logging.error(
f"Eval value is {eval} and pg distribution "
f"average difference is >=10 which is high and not acceptable"
)
return False
else:
logging.info("pg_balancer is not active")
def get_percent_used_capacity():
"""
Function to calculate the percentage of used capacity in a cluster
Returns:
float: The percentage of the used capacity in the cluster
"""
ct_pod = pod.get_ceph_tools_pod()
output = ct_pod.exec_ceph_cmd(ceph_cmd="ceph df")
total_used = output.get("stats").get("total_used_raw_bytes")
total_avail = output.get("stats").get("total_bytes")
return 100.0 * total_used / total_avail
def get_osd_pods_memory_sum():
"""
Get the sum of memory of all OSD pods. This is used to determine the size
needed for a PVC so when IO will be running over it the OSDs cache will be filled
Returns:
int: The sum of the OSD pods memory in GB
"""
osd_pods = pod.get_osd_pods()
num_of_osd_pods = len(osd_pods)
osd_pod_mem_size_str = osd_pods[0].get_memory().get("osd")
osd_pod_mem_size = convert_device_size(
unformatted_size=osd_pod_mem_size_str, units_to_covert_to="GB"
)
return num_of_osd_pods * osd_pod_mem_size
def get_child_nodes_osd_tree(node_id, osd_tree):
"""
This function finds the children of a node from the 'ceph osd tree' and returns them as list
Args:
node_id (int): the id of the node for which the children to be retrieved
osd_tree (dict): dictionary containing the output of 'ceph osd tree'
Returns:
list: of 'children' of a given node_id
"""
for i in range(len(osd_tree["nodes"])):
if osd_tree["nodes"][i]["id"] == node_id:
return osd_tree["nodes"][i]["children"]
def check_osds_in_hosts_osd_tree(hosts, osd_tree):
"""
Checks if osds are formed correctly after cluster expansion
Args:
hosts (list) : List of hosts
osd_tree (str) : 'ceph osd tree' command output
Returns:
bool : True if osd tree formatted correctly
"""
for each_host in hosts:
osd_in_each_host = get_child_nodes_osd_tree(each_host, osd_tree)
if len(osd_in_each_host) > 1 or len(osd_in_each_host) <= 0:
logger.error(
"Error. ceph osd tree is NOT formed correctly after cluster expansion"
)
return False
logger.info("osd tree verification Passed")
return True
def check_osd_tree_1az_vmware(osd_tree, number_of_osds):
"""
Checks whether an OSD tree is created/modified correctly. This can be used as a verification step for
deployment and cluster expansion tests.
This function is specifically for ocs cluster created on 1 AZ VMWare setup
Args:
osd_tree (dict): Dictionary of the values which represent 'osd tree'.
number_of_osds (int): total number of osds in the cluster
Returns:
bool: True, if the ceph osd tree is formed correctly. Else False
"""
# in case of vmware, there will be only one zone as of now. The OSDs are arranged as follows:
# ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
# -1 0.99326 root default
# -8 0.33109 rack rack0
# -7 0.33109 host ocs-deviceset-0-0-dktqc
# 1 hdd 0.33109 osd.1 up 1.00000 1.00000
# There will be 3 racks - rack0, rack1, rack2.
# When cluster expansion is successfully done, a host and an osd are added in each rack.
# The number of hosts will be equal to the number osds the cluster has. Each rack can
# have multiple hosts but each host will have only one osd under it.
number_of_hosts_expected = int(number_of_osds / 3)
all_hosts = []
racks = osd_tree["nodes"][0]["children"]
for rack in racks:
hosts = get_child_nodes_osd_tree(rack, osd_tree)
if len(hosts) != number_of_hosts_expected:
logging.error(
f"Number of hosts under rack {rack} "
f"is not matching the expected ={number_of_hosts_expected} "
)
return False
else:
all_hosts.append(hosts)
all_hosts_flatten = [item for sublist in all_hosts for item in sublist]
return check_osds_in_hosts_osd_tree(all_hosts_flatten, osd_tree)
def check_osd_tree_3az_aws(osd_tree, number_of_osds):
"""
Checks whether an OSD tree is created/modified correctly. This can be used as a verification step for
deployment and cluster expansion tests.
This function is specifically for ocs cluster created on 3 AZ AWS config
Args:
osd_tree (dict): Dictionary of the values which represent 'osd tree'.
number_of_osds (int): total number of osds in the cluster
Returns:
Boolean: True, if the ceph osd tree is formed correctly. Else False
"""
all_hosts = []
region = osd_tree["nodes"][0]["children"]
zones = get_child_nodes_osd_tree(region[0], osd_tree)
for each_zone in zones:
hosts_in_each_zone = get_child_nodes_osd_tree(each_zone, osd_tree)
if len(hosts_in_each_zone) != number_of_osds / 3: # 3 is replica_factor
logger.error("number of hosts in zone is incorrect")
return False
else:
all_hosts.append(hosts_in_each_zone)
all_hosts_flatten = [item for sublist in all_hosts for item in sublist]
return check_osds_in_hosts_osd_tree(all_hosts_flatten, osd_tree)
def check_osd_tree_1az_aws(osd_tree, number_of_osds):
"""
Checks whether an OSD tree is created/modified correctly. This can be used as a verification step for
deployment and cluster expansion tests.
This function is specifically for ocs cluster created on 1 AZ AWS config
Args:
osd_tree (dict): Dictionary of the values which represent 'osd tree'.
number_of_osds (int): total number of osds in the cluster
Returns:
Boolean: True, if the ceph osd tree is formed correctly. Else False
"""
all_hosts = []
region = osd_tree["nodes"][0]["children"]
zones = get_child_nodes_osd_tree(region[0], osd_tree)
racks = get_child_nodes_osd_tree(zones[0], osd_tree)
logging.info(f"racks = {racks}")
if len(racks) != 3:
logging.error(f"Expected 3 racks but got {len(racks)}")
for each_rack in racks:
hosts_in_each_rack = get_child_nodes_osd_tree(each_rack, osd_tree)
if len(hosts_in_each_rack) != number_of_osds / 3: # 3 is replica_factor
logging.error("number of hosts in rack is incorrect")
return False
else:
logging.info(f"adding host...{hosts_in_each_rack}")
all_hosts.append(hosts_in_each_rack)
all_hosts_flatten = [item for sublist in all_hosts for item in sublist]
return check_osds_in_hosts_osd_tree(all_hosts_flatten, osd_tree)
def check_osds_in_hosts_are_up(osd_tree):
"""
Check if all the OSD's in status 'up'
Args:
osd_tree (dict): The ceph osd tree
Returns:
bool: True if all the OSD's in status 'up'. Else False
"""
for n in osd_tree["nodes"]:
if n["type"] == "osd":
if n["status"] != "up":
logger.warning(f"osd with name {n['name']} is not up")
return False
return True
def check_ceph_osd_tree():
"""
Checks whether an OSD tree is created/modified correctly.
It is a summary of the previous functions: 'check_osd_tree_1az_vmware',
'check_osd_tree_3az_aws', 'check_osd_tree_1az_aws'.
Returns:
bool: True, if the ceph osd tree is formed correctly. Else False
"""
osd_pods = pod.get_osd_pods()
# 'ceph osd tree' should show the new osds under right nodes/hosts
# Verification is different for 3 AZ and 1 AZ configs
ct_pod = pod.get_ceph_tools_pod()
tree_output = ct_pod.exec_ceph_cmd(ceph_cmd="ceph osd tree")
if config.ENV_DATA["platform"].lower() == constants.VSPHERE_PLATFORM:
return check_osd_tree_1az_vmware(tree_output, len(osd_pods))
aws_number_of_zones = 3
if config.ENV_DATA["platform"].lower() == constants.AWS_PLATFORM:
# parse the osd tree. if it contains a node 'rack' then it's a
# AWS_1AZ cluster. Else, 3 AWS_3AZ cluster
for i in range(len(tree_output["nodes"])):
if tree_output["nodes"][i]["name"] in "rack0":
aws_number_of_zones = 1
if aws_number_of_zones == 1:
return check_osd_tree_1az_aws(tree_output, len(osd_pods))
else:
return check_osd_tree_3az_aws(tree_output, len(osd_pods))
def check_ceph_osd_tree_after_node_replacement():
"""
Check the ceph osd tree after the process of node replacement.
Returns:
bool: True if the ceph osd tree formation is correct,
and all the OSD's are up. Else False
"""
ct_pod = pod.get_ceph_tools_pod()
osd_tree = ct_pod.exec_ceph_cmd(ceph_cmd="ceph osd tree")
if not check_ceph_osd_tree():
logger.warning("Incorrect ceph osd tree formation found")
return False
if not check_osds_in_hosts_are_up(osd_tree):
logger.warning("Not all the osd's are in status 'up'")
return False
return True
def silence_ceph_osd_crash_warning(osd_pod_name):
"""
Silence the osd crash warning of a specific osd pod
Args:
osd_pod_name (str): The name of the osd pod which we need to
silence the crash warning
Returns:
bool: True if it found the osd crash with name 'osd_pod_name'. False otherwise
"""
ct_pod = pod.get_ceph_tools_pod()
new_crash_objects_list = ct_pod.exec_ceph_cmd(ceph_cmd="ceph crash ls-new")
for crash_obj in new_crash_objects_list:
if crash_obj.get("utsname_hostname") == osd_pod_name:
logger.info(f"Found osd crash with name {osd_pod_name}")
obj_crash_id = crash_obj.get("crash_id")
crash_info = ct_pod.exec_ceph_cmd(
ceph_cmd=f"ceph crash info {obj_crash_id}"
)
logger.info(f"ceph crash info: {crash_info}")
logger.info("silence the osd crash warning")
ct_pod.exec_ceph_cmd(ceph_cmd=f"ceph crash archive {obj_crash_id}")
return True
logger.info(
f"Didn't find osd crash with name {osd_pod_name} in ceph crash warnings"
)
return False
def wait_for_silence_ceph_osd_crash_warning(osd_pod_name, timeout=900):
"""
Wait for 'timeout' seconds to check for the ceph osd crash warning,
and silence it.
Args:
osd_pod_name (str): The name of the osd pod which we need to
silence the crash warning
timeout (int): time in seconds to wait for silence the osd crash warning
Returns:
bool: True if it found the osd crash with name 'osd_pod_name'. False otherwise
"""
try:
for silence_old_osd_crash_warning in TimeoutSampler(
timeout=timeout,
sleep=30,
func=silence_ceph_osd_crash_warning,
osd_pod_name=osd_pod_name,
):
if silence_old_osd_crash_warning:
return True
except TimeoutError:
return False
def validate_pvc(self):
"""
Check whether all PVCs are in bound state
"""
ocs_pvc_obj = get_all_pvc_objs(namespace=self.namespace)
for pvc_obj in ocs_pvc_obj:
assert pvc_obj.status == constants.STATUS_BOUND, {
f"PVC {pvc_obj.name} is not Bound"
}
logger.info(f"PVC {pvc_obj.name} is in Bound state")
| 33.56271 | 105 | 0.624675 |
e66d5e1f08dc9a4e5c8cb49651bf2a219e4f50a8
| 3,621 |
py
|
Python
|
scenic/projects/baselines/detr/configs/detr_config.py
|
techthiyanes/scenic
|
05585b1189364e29d82413b9d4a50ffa8c246f0c
|
[
"Apache-2.0"
] | null | null | null |
scenic/projects/baselines/detr/configs/detr_config.py
|
techthiyanes/scenic
|
05585b1189364e29d82413b9d4a50ffa8c246f0c
|
[
"Apache-2.0"
] | null | null | null |
scenic/projects/baselines/detr/configs/detr_config.py
|
techthiyanes/scenic
|
05585b1189364e29d82413b9d4a50ffa8c246f0c
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=line-too-long
r"""Default configs for COCO detection using DETR.
"""
# pylint: enable=line-too-long
import copy
import ml_collections
_COCO_TRAIN_SIZE = 118287
NUM_EPOCHS = 300
def get_config():
"""Returns the configuration for COCO detection using DETR."""
config = ml_collections.ConfigDict()
config.experiment_name = 'coco_detection_detr'
# Dataset.
config.dataset_name = 'coco_detr_detection'
config.dataset_configs = ml_collections.ConfigDict()
config.dataset_configs.prefetch_to_device = 2
config.dataset_configs.shuffle_buffer_size = 10_000
config.dataset_configs.max_boxes = 99
config.data_dtype_str = 'float32'
# Model.
config.model_dtype_str = 'float32'
config.model_name = 'detr'
config.matcher = 'hungarian_cover_tpu'
config.hidden_dim = 256
config.num_queries = 100
config.query_emb_size = None # Same as hidden_size.
config.transformer_num_heads = 8
config.transformer_num_encoder_layers = 6
config.transformer_num_decoder_layers = 6
config.transformer_qkv_dim = 256
config.transformer_mlp_dim = 2048
config.transformer_normalize_before = False
config.backbone_num_filters = 64
config.backbone_num_layers = 50
config.dropout_rate = 0.
config.attention_dropout_rate = 0.1
# Loss.
config.aux_loss = True
config.bbox_loss_coef = 5.0
config.giou_loss_coef = 2.0
config.class_loss_coef = 1.0
config.eos_coef = 0.1
# Training.
config.trainer_name = 'detr_trainer'
config.optimizer = 'adam'
config.optimizer_configs = ml_collections.ConfigDict()
config.optimizer_configs.weight_decay = 1e-4
config.optimizer_configs.beta1 = 0.9
config.optimizer_configs.beta2 = 0.999
config.max_grad_norm = 0.1
config.num_training_epochs = NUM_EPOCHS
config.batch_size = 64
config.rng_seed = 0
decay_events = {500: 400}
# Learning rate.
steps_per_epoch = _COCO_TRAIN_SIZE // config.batch_size
config.lr_configs = ml_collections.ConfigDict()
config.lr_configs.learning_rate_schedule = 'compound'
config.lr_configs.factors = 'constant*piecewise_constant'
config.lr_configs.decay_events = [
decay_events.get(NUM_EPOCHS, NUM_EPOCHS * 2 // 3) * steps_per_epoch,
]
# Note: this is absolute (not relative):
config.lr_configs.decay_factors = [.1]
config.lr_configs.base_learning_rate = 1e-4
# Backbone training configs: optimizer and learning rate.
config.backbone_training = ml_collections.ConfigDict()
config.backbone_training.optimizer = copy.deepcopy(config.optimizer)
config.backbone_training.optimizer_configs = copy.deepcopy(
config.optimizer_configs)
config.backbone_training.lr_configs = copy.deepcopy(config.lr_configs)
config.backbone_training.lr_configs.base_learning_rate = 1e-5
# Pretrained_backbone.
config.load_pretrained_backbone = True
config.freeze_backbone_batch_stats = True
config.pretrained_backbone_configs = ml_collections.ConfigDict()
# Download pretrained ResNet50 checkpoints from here:
# https://github.com/google-research/scenic/tree/main/scenic/projects/baselines pylint: disable=line-too-long
config.pretrained_backbone_configs.checkpoint_path = 'path_to_checkpoint_of_resnet_50'
# Logging.
config.write_summary = True
config.xprof = True # Profile using xprof.
config.log_summary_steps = 50 # train summary steps
config.log_large_summary_steps = 1000 # Expensive summary operations freq
config.checkpoint = True # Do checkpointing.
config.checkpoint_steps = steps_per_epoch
config.debug_train = False # Debug mode during training.
config.debug_eval = False # Debug mode during eval.
return config
| 34.160377 | 111 | 0.775753 |
e66dd75ae0bf7e3d43a0a0b5833ef2c98e86a332
| 581 |
py
|
Python
|
tests/conftest.py
|
artembashlak/share-youtube-to-mail
|
347f72ed8846b85cae8e4f39896ab54e698a6de9
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
artembashlak/share-youtube-to-mail
|
347f72ed8846b85cae8e4f39896ab54e698a6de9
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
artembashlak/share-youtube-to-mail
|
347f72ed8846b85cae8e4f39896ab54e698a6de9
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
| 30.578947 | 79 | 0.753873 |
e66dd9b0c4524178c41ae4349d387915dbfbc5a0
| 2,105 |
py
|
Python
|
prepare_cicero_peaks.py
|
lab-medvedeva/SCABFA-feature-selection
|
d5cd7568e667a75f75e753d9ab9dc645f3166902
|
[
"MIT"
] | null | null | null |
prepare_cicero_peaks.py
|
lab-medvedeva/SCABFA-feature-selection
|
d5cd7568e667a75f75e753d9ab9dc645f3166902
|
[
"MIT"
] | null | null | null |
prepare_cicero_peaks.py
|
lab-medvedeva/SCABFA-feature-selection
|
d5cd7568e667a75f75e753d9ab9dc645f3166902
|
[
"MIT"
] | null | null | null |
from scale.dataset import read_mtx
from argparse import ArgumentParser
import pandas as pd
import numpy as np
import os
if __name__ == '__main__':
main()
| 40.480769 | 113 | 0.669359 |
e66e53547faa705c9a68f28dba07b4048f2f1b31
| 2,335 |
py
|
Python
|
crusoe_observe/neo4j-client/neo4jclient/CMSClient.py
|
CSIRT-MU/CRUSOE
|
73e4ac0ced6c3ac46d24ac5c3feb01a1e88bd36b
|
[
"MIT"
] | 3 |
2021-11-09T09:55:17.000Z
|
2022-02-19T02:58:27.000Z
|
crusoe_observe/neo4j-client/neo4jclient/CMSClient.py
|
CSIRT-MU/CRUSOE
|
73e4ac0ced6c3ac46d24ac5c3feb01a1e88bd36b
|
[
"MIT"
] | null | null | null |
crusoe_observe/neo4j-client/neo4jclient/CMSClient.py
|
CSIRT-MU/CRUSOE
|
73e4ac0ced6c3ac46d24ac5c3feb01a1e88bd36b
|
[
"MIT"
] | null | null | null |
from neo4jclient.AbsClient import AbstractClient
| 39.576271 | 117 | 0.576017 |
e66ec2107d63dfd849c5ad20ff3a6280caaa39d1
| 604 |
py
|
Python
|
location.py
|
jonasjucker/wildlife-telegram
|
5fb548d3779782467247cf5d1e165d1c2349de30
|
[
"MIT"
] | null | null | null |
location.py
|
jonasjucker/wildlife-telegram
|
5fb548d3779782467247cf5d1e165d1c2349de30
|
[
"MIT"
] | null | null | null |
location.py
|
jonasjucker/wildlife-telegram
|
5fb548d3779782467247cf5d1e165d1c2349de30
|
[
"MIT"
] | null | null | null |
import time
from datetime import date,datetime
from astral import LocationInfo
from astral.sun import sun
| 27.454545 | 80 | 0.652318 |
e66eceebc9bb0cd90db3c066088340ee6f011e6e
| 545 |
py
|
Python
|
lang/py/cookbook/v2/source/cb2_20_9_exm_1.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | null | null | null |
lang/py/cookbook/v2/source/cb2_20_9_exm_1.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | null | null | null |
lang/py/cookbook/v2/source/cb2_20_9_exm_1.py
|
ch1huizong/learning
|
632267634a9fd84a5f5116de09ff1e2681a6cc85
|
[
"MIT"
] | null | null | null |
sk = Skidoo()
| 41.923077 | 70 | 0.702752 |
e66fe14aa361b0d83b0ed955a7d77eeda49c3b80
| 571 |
py
|
Python
|
face2anime/nb_utils.py
|
davidleonfdez/face2anime
|
896bf85a7aa28322cc9e9e586685db8cbbf39d89
|
[
"MIT"
] | null | null | null |
face2anime/nb_utils.py
|
davidleonfdez/face2anime
|
896bf85a7aa28322cc9e9e586685db8cbbf39d89
|
[
"MIT"
] | 1 |
2022-01-15T23:57:33.000Z
|
2022-01-15T23:57:33.000Z
|
face2anime/nb_utils.py
|
davidleonfdez/face2anime
|
896bf85a7aa28322cc9e9e586685db8cbbf39d89
|
[
"MIT"
] | null | null | null |
import importlib
__all__ = ['mount_gdrive']
def mount_gdrive() -> str:
"""Mount Google Drive storage of the current Google account and return the root path.
Functionality only available in Google Colab Enviroment; otherwise, it raises a RuntimeError.
"""
if (importlib.util.find_spec("google.colab") is None):
raise RuntimeError("Cannot mount Google Drive outside of Google Colab.")
from google.colab import drive
drive.mount('/content/gdrive', force_remount=True)
root_dir = "/content/gdrive/My Drive/"
return root_dir
| 28.55 | 97 | 0.712785 |
e670e0b486388fd350ec3090250f4bbe49211d07
| 6,225 |
py
|
Python
|
wasch/tests.py
|
waschag-tvk/pywaschedv
|
8f0428827c4c1c7e9462eaa94ba02290db1c340f
|
[
"MIT"
] | 1 |
2020-01-17T16:35:10.000Z
|
2020-01-17T16:35:10.000Z
|
wasch/tests.py
|
waschag-tvk/pywaschedv
|
8f0428827c4c1c7e9462eaa94ba02290db1c340f
|
[
"MIT"
] | 6 |
2018-06-01T15:02:11.000Z
|
2018-09-04T15:33:05.000Z
|
wasch/tests.py
|
waschag-tvk/pywaschedv
|
8f0428827c4c1c7e9462eaa94ba02290db1c340f
|
[
"MIT"
] | null | null | null |
import datetime
from django.utils import timezone
from django.test import TestCase
from django.contrib.auth.models import (
User,
)
from wasch.models import (
Appointment,
WashUser,
WashParameters,
# not models:
AppointmentError,
StatusRights,
)
from wasch import tvkutils, payment
| 42.060811 | 79 | 0.676948 |
e671c98e986dfbf41b15884e3c4cc078b893ecb2
| 1,040 |
py
|
Python
|
Python/problem1150.py
|
1050669722/LeetCode-Answers
|
c8f4d1ccaac09cda63b60d75144335347b06dc81
|
[
"MIT"
] | null | null | null |
Python/problem1150.py
|
1050669722/LeetCode-Answers
|
c8f4d1ccaac09cda63b60d75144335347b06dc81
|
[
"MIT"
] | null | null | null |
Python/problem1150.py
|
1050669722/LeetCode-Answers
|
c8f4d1ccaac09cda63b60d75144335347b06dc81
|
[
"MIT"
] | null | null | null |
from typing import List
from collections import Counter
# class Solution:
# def isMajorityElement(self, nums: List[int], target: int) -> bool:
# d = Counter(nums)
# return d[target] > len(nums)//2
# class Solution:
# def isMajorityElement(self, nums: List[int], target: int) -> bool:
# ans = 0
# for num in nums:
# if num == target:
# ans += 1
# return ans > len(target)//2
| 25.365854 | 72 | 0.476923 |
e672d8fb22849a3e49b4cf1505ef89fb8d62430d
| 2,018 |
py
|
Python
|
day17/module.py
|
arcadecoffee/advent-2021
|
57d24cd6ba6e2b4d7e68ea492b955b73eaad7b6a
|
[
"MIT"
] | null | null | null |
day17/module.py
|
arcadecoffee/advent-2021
|
57d24cd6ba6e2b4d7e68ea492b955b73eaad7b6a
|
[
"MIT"
] | null | null | null |
day17/module.py
|
arcadecoffee/advent-2021
|
57d24cd6ba6e2b4d7e68ea492b955b73eaad7b6a
|
[
"MIT"
] | null | null | null |
"""
Advent of Code 2021 - Day 17
https://adventofcode.com/2021/day/17
"""
import re
from math import ceil, sqrt
from typing import List, Tuple
DAY = 17
FULL_INPUT_FILE = f'../inputs/day{DAY:02d}/input.full.txt'
TEST_INPUT_FILE = f'../inputs/day{DAY:02d}/input.test.txt'
if __name__ == '__main__':
part1_answer = part_1(FULL_INPUT_FILE)
print(f'Part 1: {part1_answer}')
part2_answer = part_2(FULL_INPUT_FILE)
print(f'Part 2: {part2_answer}')
| 29.246377 | 92 | 0.617939 |
e67406a638efa86479227542aee6a924595e4826
| 4,235 |
py
|
Python
|
src/main/python/depysible/domain/rete.py
|
stefano-bragaglia/DePYsible
|
6b53ede459a10f5e24da89d3ebaa05f08ec7af12
|
[
"BSD-2-Clause"
] | 4 |
2018-09-24T23:51:05.000Z
|
2021-01-06T09:13:52.000Z
|
src/main/python/depysible/domain/rete.py
|
stefano-bragaglia/DefeasiblePython
|
6b53ede459a10f5e24da89d3ebaa05f08ec7af12
|
[
"BSD-2-Clause"
] | 1 |
2020-05-26T01:14:44.000Z
|
2020-05-27T07:54:15.000Z
|
src/main/python/depysible/domain/rete.py
|
stefano-bragaglia/DePYsible
|
6b53ede459a10f5e24da89d3ebaa05f08ec7af12
|
[
"BSD-2-Clause"
] | null | null | null |
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
Payload = Tuple[List['Literal'], 'Substitutions']
def fire_rules(program: 'Program') -> List['Rule']:
if program.is_ground():
return program
rules = []
table = {}
root = Root()
for rule in program.rules:
if rule.is_fact():
rules.append(rule)
else:
beta = None
for lit in rule.body:
name = repr(lit)
alfa = table.setdefault(name, Alfa(lit, root))
if beta is None:
beta = alfa
else:
name = '%s, %s' % (beta.name, alfa.name)
beta = table.setdefault(name, Beta(beta, alfa))
Leaf(rule, beta, root, rules)
for fact in program.get_facts():
root.notify(fact.head)
return rules
| 32.083333 | 100 | 0.563872 |
e6751ce031099f22bcc8f169d0324a7aff0147ed
| 15,501 |
py
|
Python
|
pythonbot_1.0/GameData.py
|
jeffreyzli/pokerbot-2017
|
df2aa31d6aaf0e3162d24ae5f4c2a918ab19831f
|
[
"MIT"
] | 1 |
2017-01-18T21:25:21.000Z
|
2017-01-18T21:25:21.000Z
|
pythonbot_1.0/GameData.py
|
jeffreyzli/pokerbot-2017
|
df2aa31d6aaf0e3162d24ae5f4c2a918ab19831f
|
[
"MIT"
] | null | null | null |
pythonbot_1.0/GameData.py
|
jeffreyzli/pokerbot-2017
|
df2aa31d6aaf0e3162d24ae5f4c2a918ab19831f
|
[
"MIT"
] | 3 |
2017-02-06T04:35:02.000Z
|
2020-03-08T18:56:25.000Z
|
import HandRankings as Hand
from deuces.deuces import Card, Evaluator
| 43.298883 | 102 | 0.48055 |
e675854ddbd73f687dc3955ba80c468d17bec3c4
| 801 |
py
|
Python
|
todo/models.py
|
zyayoung/share-todo
|
84813545f9aa3e89441c560e64e85bc799835d30
|
[
"MIT"
] | null | null | null |
todo/models.py
|
zyayoung/share-todo
|
84813545f9aa3e89441c560e64e85bc799835d30
|
[
"MIT"
] | null | null | null |
todo/models.py
|
zyayoung/share-todo
|
84813545f9aa3e89441c560e64e85bc799835d30
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
| 26.7 | 63 | 0.657928 |
e675c9e19056933d226c148a0c8e55351caf07f1
| 20,377 |
py
|
Python
|
examples/Tutorial/Example/app.py
|
DrewLazzeriKitware/trame
|
fdc73f07f17d2601e1b1d3934d2d6326a3c0281e
|
[
"BSD-3-Clause"
] | null | null | null |
examples/Tutorial/Example/app.py
|
DrewLazzeriKitware/trame
|
fdc73f07f17d2601e1b1d3934d2d6326a3c0281e
|
[
"BSD-3-Clause"
] | null | null | null |
examples/Tutorial/Example/app.py
|
DrewLazzeriKitware/trame
|
fdc73f07f17d2601e1b1d3934d2d6326a3c0281e
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from trame import change, update_state
from trame.layouts import SinglePageWithDrawer
from trame.html import vtk, vuetify, widgets
from vtkmodules.vtkCommonDataModel import vtkDataObject
from vtkmodules.vtkFiltersCore import vtkContourFilter
from vtkmodules.vtkIOXML import vtkXMLUnstructuredGridReader
from vtkmodules.vtkRenderingAnnotation import vtkCubeAxesActor
from vtkmodules.vtkRenderingCore import (
vtkActor,
vtkDataSetMapper,
vtkRenderer,
vtkRenderWindow,
vtkRenderWindowInteractor,
)
# Required for interacter factory initialization
from vtkmodules.vtkInteractionStyle import vtkInteractorStyleSwitch # noqa
# Required for remote rendering factory initialization, not necessary for
# local rendering, but doesn't hurt to include it
import vtkmodules.vtkRenderingOpenGL2 # noqa
CURRENT_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
# -----------------------------------------------------------------------------
# Constants
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# VTK pipeline
# -----------------------------------------------------------------------------
renderer = vtkRenderer()
renderWindow = vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera()
# Read Data
reader = vtkXMLUnstructuredGridReader()
reader.SetFileName(os.path.join(CURRENT_DIRECTORY, "../data/disk_out_ref.vtu"))
reader.Update()
# Extract Array/Field information
dataset_arrays = []
fields = [
(reader.GetOutput().GetPointData(), vtkDataObject.FIELD_ASSOCIATION_POINTS),
(reader.GetOutput().GetCellData(), vtkDataObject.FIELD_ASSOCIATION_CELLS),
]
for field in fields:
field_arrays, association = field
for i in range(field_arrays.GetNumberOfArrays()):
array = field_arrays.GetArray(i)
array_range = array.GetRange()
dataset_arrays.append(
{
"text": array.GetName(),
"value": i,
"range": list(array_range),
"type": association,
}
)
default_array = dataset_arrays[0]
default_min, default_max = default_array.get("range")
# Mesh
mesh_mapper = vtkDataSetMapper()
mesh_mapper.SetInputConnection(reader.GetOutputPort())
mesh_actor = vtkActor()
mesh_actor.SetMapper(mesh_mapper)
renderer.AddActor(mesh_actor)
# Mesh: Setup default representation to surface
mesh_actor.GetProperty().SetRepresentationToSurface()
mesh_actor.GetProperty().SetPointSize(1)
mesh_actor.GetProperty().EdgeVisibilityOff()
# Mesh: Apply rainbow color map
mesh_lut = mesh_mapper.GetLookupTable()
mesh_lut.SetHueRange(0.666, 0.0)
mesh_lut.SetSaturationRange(1.0, 1.0)
mesh_lut.SetValueRange(1.0, 1.0)
mesh_lut.Build()
# Mesh: Color by default array
mesh_mapper.SelectColorArray(default_array.get("text"))
mesh_mapper.GetLookupTable().SetRange(default_min, default_max)
if default_array.get("type") == vtkDataObject.FIELD_ASSOCIATION_POINTS:
mesh_mapper.SetScalarModeToUsePointFieldData()
else:
mesh_mapper.SetScalarModeToUseCellFieldData()
mesh_mapper.SetScalarVisibility(True)
mesh_mapper.SetUseLookupTableScalarRange(True)
# Contour
contour = vtkContourFilter()
contour.SetInputConnection(reader.GetOutputPort())
contour_mapper = vtkDataSetMapper()
contour_mapper.SetInputConnection(contour.GetOutputPort())
contour_actor = vtkActor()
contour_actor.SetMapper(contour_mapper)
renderer.AddActor(contour_actor)
# Contour: ContourBy default array
contour_value = 0.5 * (default_max + default_min)
contour.SetInputArrayToProcess(
0, 0, 0, default_array.get("type"), default_array.get("text")
)
contour.SetValue(0, contour_value)
# Contour: Setup default representation to surface
contour_actor.GetProperty().SetRepresentationToSurface()
contour_actor.GetProperty().SetPointSize(1)
contour_actor.GetProperty().EdgeVisibilityOff()
# Contour: Apply rainbow color map
contour_lut = contour_mapper.GetLookupTable()
contour_lut.SetHueRange(0.666, 0.0)
contour_lut.SetSaturationRange(1.0, 1.0)
contour_lut.SetValueRange(1.0, 1.0)
contour_lut.Build()
# Contour: Color by default array
contour_mapper.GetLookupTable().SetRange(default_min, default_max)
contour_mapper.SelectColorArray(default_array.get("text"))
if default_array.get("type") == vtkDataObject.FIELD_ASSOCIATION_POINTS:
contour_mapper.SetScalarModeToUsePointFieldData()
else:
contour_mapper.SetScalarModeToUseCellFieldData()
contour_mapper.SetScalarVisibility(True)
contour_mapper.SetUseLookupTableScalarRange(True)
# Cube Axes
cube_axes = vtkCubeAxesActor()
renderer.AddActor(cube_axes)
# Cube Axes: Boundaries, camera, and styling
cube_axes.SetBounds(mesh_actor.GetBounds())
cube_axes.SetCamera(renderer.GetActiveCamera())
cube_axes.SetXLabelFormat("%6.1f")
cube_axes.SetYLabelFormat("%6.1f")
cube_axes.SetZLabelFormat("%6.1f")
cube_axes.SetFlyModeToOuterEdges()
renderer.ResetCamera()
# -----------------------------------------------------------------------------
# trame Views
# -----------------------------------------------------------------------------
local_view = vtk.VtkLocalView(renderWindow)
remote_view = vtk.VtkRemoteView(renderWindow, interactive_ratio=(1,))
html_view = local_view
# -----------------------------------------------------------------------------
# Callbacks
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Toolbar Callbacks
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Representation Callbacks
# -----------------------------------------------------------------------------
def update_representation(actor, mode):
property = actor.GetProperty()
if mode == Representation.Points:
property.SetRepresentationToPoints()
property.SetPointSize(5)
property.EdgeVisibilityOff()
elif mode == Representation.Wireframe:
property.SetRepresentationToWireframe()
property.SetPointSize(1)
property.EdgeVisibilityOff()
elif mode == Representation.Surface:
property.SetRepresentationToSurface()
property.SetPointSize(1)
property.EdgeVisibilityOff()
elif mode == Representation.SurfaceWithEdges:
property.SetRepresentationToSurface()
property.SetPointSize(1)
property.EdgeVisibilityOn()
# -----------------------------------------------------------------------------
# ColorBy Callbacks
# -----------------------------------------------------------------------------
def color_by_array(actor, array):
_min, _max = array.get("range")
mapper = actor.GetMapper()
mapper.SelectColorArray(array.get("text"))
mapper.GetLookupTable().SetRange(_min, _max)
if array.get("type") == vtkDataObject.FIELD_ASSOCIATION_POINTS:
mesh_mapper.SetScalarModeToUsePointFieldData()
else:
mesh_mapper.SetScalarModeToUseCellFieldData()
mapper.SetScalarModeToUsePointFieldData()
mapper.SetScalarVisibility(True)
mapper.SetUseLookupTableScalarRange(True)
# -----------------------------------------------------------------------------
# ColorMap Callbacks
# -----------------------------------------------------------------------------
def use_preset(actor, preset):
lut = actor.GetMapper().GetLookupTable()
if preset == LookupTable.Rainbow:
lut.SetHueRange(0.666, 0.0)
lut.SetSaturationRange(1.0, 1.0)
lut.SetValueRange(1.0, 1.0)
elif preset == LookupTable.Inverted_Rainbow:
lut.SetHueRange(0.0, 0.666)
lut.SetSaturationRange(1.0, 1.0)
lut.SetValueRange(1.0, 1.0)
elif preset == LookupTable.Greyscale:
lut.SetHueRange(0.0, 0.0)
lut.SetSaturationRange(0.0, 0.0)
lut.SetValueRange(0.0, 1.0)
elif preset == LookupTable.Inverted_Greyscale:
lut.SetHueRange(0.0, 0.666)
lut.SetSaturationRange(0.0, 0.0)
lut.SetValueRange(1.0, 0.0)
lut.Build()
# -----------------------------------------------------------------------------
# Opacity Callbacks
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Contour Callbacks
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Pipeline Widget Callbacks
# -----------------------------------------------------------------------------
# Selection Change
def actives_change(ids):
_id = ids[0]
if _id == "1": # Mesh
update_state("active_ui", "mesh")
elif _id == "2": # Contour
update_state("active_ui", "contour")
else:
update_state("active_ui", "nothing")
# Visibility Change
# -----------------------------------------------------------------------------
# GUI Toolbar Buttons
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# GUI Pipelines Widget
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# GUI Cards
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# GUI
# -----------------------------------------------------------------------------
layout = SinglePageWithDrawer("Viewer", on_ready=update_view)
layout.title.set_text("Viewer")
with layout.toolbar:
# toolbar components
vuetify.VSpacer()
vuetify.VDivider(vertical=True, classes="mx-2")
standard_buttons()
with layout.drawer as drawer:
# drawer components
drawer.width = 325
pipeline_widget()
vuetify.VDivider(classes="mb-2")
mesh_card()
contour_card()
with layout.content:
# content components
vuetify.VContainer(
fluid=True,
classes="pa-0 fill-height",
children=[html_view],
)
# State use to track active ui card
layout.state = {
"active_ui": None,
}
# -----------------------------------------------------------------------------
# Main
# -----------------------------------------------------------------------------
if __name__ == "__main__":
layout.start()
| 31.739875 | 81 | 0.55332 |
e6773e141755afe2a0e2167251aa0bc85bd1863f
| 2,849 |
py
|
Python
|
webots_ros2_tutorials/webots_ros2_tutorials/master.py
|
AleBurzio11/webots_ros2
|
99fa4a1a9d467e4ba71eff17ddf4e82444c78938
|
[
"Apache-2.0"
] | 1 |
2021-09-09T13:11:15.000Z
|
2021-09-09T13:11:15.000Z
|
webots_ros2_tutorials/webots_ros2_tutorials/master.py
|
fmrico/webots_ros2
|
38d88e01fe174a8a00731f554f1a8646b9127bd2
|
[
"Apache-2.0"
] | 1 |
2021-07-08T08:29:26.000Z
|
2021-10-01T07:57:12.000Z
|
webots_ros2_tutorials/webots_ros2_tutorials/master.py
|
fmrico/webots_ros2
|
38d88e01fe174a8a00731f554f1a8646b9127bd2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 1996-2021 Soft_illusion.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rclpy
from rclpy.node import Node
from std_msgs.msg import Float64
from geometry_msgs.msg import Twist
if __name__ == '__main__':
main()
| 29.677083 | 88 | 0.657073 |
e6774fb2431795bf70a9da58c9b195ced57c3c9e
| 839 |
py
|
Python
|
dev-template/src/mysql_connect_sample.py
|
arrowkato/pytest-CircleiCI
|
2f6a1460a48bf88547538cfc72880a9c86f9ec23
|
[
"MIT"
] | null | null | null |
dev-template/src/mysql_connect_sample.py
|
arrowkato/pytest-CircleiCI
|
2f6a1460a48bf88547538cfc72880a9c86f9ec23
|
[
"MIT"
] | 10 |
2020-08-24T00:25:06.000Z
|
2020-11-08T03:58:48.000Z
|
dev-template/src/mysql_connect_sample.py
|
arrowkato/pytest-CircleiCI
|
2f6a1460a48bf88547538cfc72880a9c86f9ec23
|
[
"MIT"
] | null | null | null |
import mysql.connector
from mysql.connector import errorcode
config = {
'user': 'user',
'password': 'password',
'host': 'mysql_container',
'database': 'sample_db',
'port': '3306',
}
if __name__ == "__main__":
try:
conn = mysql.connector.connect(**config)
cursor = conn.cursor()
cursor.execute('select * from users')
for row in cursor.fetchall():
print("name:" + str(row[0]) + "" + "time_zone_id" + str(row[1]))
conn.close()
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
conn.close()
| 28.931034 | 76 | 0.587604 |
e677b427e6603c8fe21acf94f00727cd3ed74b7a
| 920 |
py
|
Python
|
Mundo 1/ex011.py
|
viniciusbonito/CeV-Python-Exercicios
|
6182421332f6f0c0a567c3e125fdc05736fa6281
|
[
"MIT"
] | null | null | null |
Mundo 1/ex011.py
|
viniciusbonito/CeV-Python-Exercicios
|
6182421332f6f0c0a567c3e125fdc05736fa6281
|
[
"MIT"
] | null | null | null |
Mundo 1/ex011.py
|
viniciusbonito/CeV-Python-Exercicios
|
6182421332f6f0c0a567c3e125fdc05736fa6281
|
[
"MIT"
] | null | null | null |
# criar um programa que pergunte as dimenses de uma parede, calcule sua rea e informe quantos litros de tinta
# seriam necessrios para a pintura, aps perguntar o rendimento da tinta informado na lata
print('=' * 40)
print('{:^40}'.format('Assistente de pintura'))
print('=' * 40)
altura = float(input('Informe a altura da parede em metros: '))
largura = float(input('Informe a largura da parede em metros: '))
area = altura * largura
print('\nA rea total da parede de {:.2f}m'.format(area))
litros = float(input('\nQuantos litros contm a lata de tinta escolhida? '))
rendlata = float(input('Qual o rendimento em metros informado na lata? '))
rendlitro = rendlata / litros
print('\nSe a lata possui {:.2f}L e rende {:.2f}m'.format(litros, rendlata))
print('ento o rendimento por litro de {:.2f}m'.format(rendlitro))
print('\nSero necessrio {:.2f}L para pintar toda a parede'.format(area / rendlitro))
| 46 | 111 | 0.723913 |
e677b75c2a6dcc29dc727e2cdc804229c99df35d
| 591 |
py
|
Python
|
Python/Mundo 3/ex088.py
|
henrique-tavares/Coisas
|
f740518b1bedec5b0ea8c12ae07a2cac21eb51ae
|
[
"MIT"
] | 1 |
2020-02-07T20:39:26.000Z
|
2020-02-07T20:39:26.000Z
|
Python/Mundo 3/ex088.py
|
neptune076/Coisas
|
85c064cc0e134465aaf6ef41acf747d47f108fc9
|
[
"MIT"
] | null | null | null |
Python/Mundo 3/ex088.py
|
neptune076/Coisas
|
85c064cc0e134465aaf6ef41acf747d47f108fc9
|
[
"MIT"
] | null | null | null |
from random import sample
from time import sleep
jogos = list()
print('-' * 20)
print(f'{"MEGA SENA":^20}')
print('-' * 20)
while True:
n = int(input("\nQuatos jogos voc quer que eu sorteie? "))
if (n > 0):
break
print('\n[ERRO] Valor fora do intervalo')
print()
print('-=' * 3, end=' ')
print(f'SORTEANDO {n} JOGOS', end=' ')
print('-=' * 3)
for i in range(n):
jogos.append(sample(range(1,61), 6))
sleep(0.6)
print(f'Jogo {i+1}: {jogos[i]}')
print('-=' * 5, end=' ')
print('< BOA SORTE >', end=' ')
print('-=' * 3, end='\n\n')
| 17.909091 | 63 | 0.527919 |
e6783e2d5b99dd220ab72c9c82dce296b3c378e7
| 49,475 |
py
|
Python
|
tests/test_utils.py
|
django-roles-access/master
|
066d0d6b99b986eacc736e6973b415cbb9172d46
|
[
"MIT"
] | 5 |
2019-03-22T08:08:25.000Z
|
2019-04-11T11:46:52.000Z
|
tests/test_utils.py
|
django-roles-access/master
|
066d0d6b99b986eacc736e6973b415cbb9172d46
|
[
"MIT"
] | 5 |
2019-04-03T21:53:52.000Z
|
2019-05-22T22:41:34.000Z
|
tests/test_utils.py
|
django-roles-access/master
|
066d0d6b99b986eacc736e6973b415cbb9172d46
|
[
"MIT"
] | null | null | null |
from importlib import import_module
from unittest import TestCase as UnitTestCase
from django.contrib.auth.models import Group
from django.core.management import BaseCommand
from django.conf import settings
from django.test import TestCase
from django.views.generic import TemplateView
try:
from unittest.mock import Mock, patch, MagicMock
except:
from mock import Mock, patch
from django_roles_access.decorator import access_by_role
from django_roles_access.mixin import RolesMixin
from django_roles_access.models import ViewAccess
from tests import views
from django_roles_access.utils import (walk_site_url, get_views_by_app,
view_access_analyzer,
get_view_analyze_report,
check_django_roles_is_used,
analyze_by_role, APP_NAME_FOR_NONE,
NOT_SECURED_DEFAULT, SECURED_DEFAULT,
PUBLIC_DEFAULT, NONE_TYPE_DEFAULT,
DISABLED_DEFAULT, OutputReport)
| 40.453802 | 85 | 0.661607 |
e67851bbe8e0d15c96340d34374c9950c15106d4
| 13,892 |
py
|
Python
|
favorite_files.py
|
jasondavis/FavoriteFiles
|
be088259ac36383399eebe85d8d5b35e235d25b0
|
[
"MIT",
"Unlicense"
] | 1 |
2019-04-27T20:13:19.000Z
|
2019-04-27T20:13:19.000Z
|
favorite_files.py
|
jasondavis/FavoriteFiles
|
be088259ac36383399eebe85d8d5b35e235d25b0
|
[
"MIT",
"Unlicense"
] | null | null | null |
favorite_files.py
|
jasondavis/FavoriteFiles
|
be088259ac36383399eebe85d8d5b35e235d25b0
|
[
"MIT",
"Unlicense"
] | null | null | null |
'''
Favorite Files
Licensed under MIT
Copyright (c) 2012 Isaac Muse <[email protected]>
'''
import sublime
import sublime_plugin
from os.path import join, exists, normpath
from favorites import Favorites
Favs = Favorites(join(sublime.packages_path(), 'User', 'favorite_files_list.json'))
| 37.444744 | 136 | 0.500864 |
e678937ffa958feedad60c6818f9966146fc7fd7
| 229 |
py
|
Python
|
tests/list/list03.py
|
ktok07b6/polyphony
|
657c5c7440520db6b4985970bd50547407693ac4
|
[
"MIT"
] | 83 |
2015-11-30T09:59:13.000Z
|
2021-08-03T09:12:28.000Z
|
tests/list/list03.py
|
jesseclin/polyphony
|
657c5c7440520db6b4985970bd50547407693ac4
|
[
"MIT"
] | 4 |
2017-02-10T01:43:11.000Z
|
2020-07-14T03:52:25.000Z
|
tests/list/list03.py
|
jesseclin/polyphony
|
657c5c7440520db6b4985970bd50547407693ac4
|
[
"MIT"
] | 11 |
2016-11-18T14:39:15.000Z
|
2021-02-23T10:05:20.000Z
|
from polyphony import testbench
test()
| 14.3125 | 31 | 0.515284 |
e679989ea74254d7fd372bced3748665b5351845
| 4,361 |
py
|
Python
|
sc2clanman/views.py
|
paskausks/sc2cm
|
9c80e581933531496333d4a54c40174d4fb583a5
|
[
"MIT"
] | null | null | null |
sc2clanman/views.py
|
paskausks/sc2cm
|
9c80e581933531496333d4a54c40174d4fb583a5
|
[
"MIT"
] | null | null | null |
sc2clanman/views.py
|
paskausks/sc2cm
|
9c80e581933531496333d4a54c40174d4fb583a5
|
[
"MIT"
] | null | null | null |
#!/bin/env python3
from collections import Counter
from django.conf import settings
from django.contrib.auth.decorators import login_required, permission_required
from django.db import models as dm
from django.shortcuts import get_object_or_404, render
from django.views.generic.list import BaseListView
from django.views.generic import TemplateView
from django.utils.decorators import method_decorator
from . import models, apps, sc2, mixins
| 33.037879 | 110 | 0.661546 |
e67abeee75de516885fc3f200a8feafafe7fd320
| 2,313 |
py
|
Python
|
manimlib/mobject/functions.py
|
parmentelat/manim
|
f05f94fbf51c70591bed3092587a5db0de439738
|
[
"MIT"
] | 1 |
2021-02-04T12:54:36.000Z
|
2021-02-04T12:54:36.000Z
|
manimlib/mobject/functions.py
|
parmentelat/manim
|
f05f94fbf51c70591bed3092587a5db0de439738
|
[
"MIT"
] | null | null | null |
manimlib/mobject/functions.py
|
parmentelat/manim
|
f05f94fbf51c70591bed3092587a5db0de439738
|
[
"MIT"
] | null | null | null |
from manimlib.constants import *
from manimlib.mobject.types.vectorized_mobject import VMobject
from manimlib.utils.config_ops import digest_config
from manimlib.utils.space_ops import get_norm
| 31.684932 | 90 | 0.609166 |
e67af792ae036b2a2bc22a1b166e10db5dcc3d7e
| 9,704 |
py
|
Python
|
lib/ecsmate/ecs.py
|
doudoudzj/ecsmate
|
dda508a64ef9d6979dcc83377bb007d2a0acec30
|
[
"Apache-2.0"
] | null | null | null |
lib/ecsmate/ecs.py
|
doudoudzj/ecsmate
|
dda508a64ef9d6979dcc83377bb007d2a0acec30
|
[
"Apache-2.0"
] | null | null | null |
lib/ecsmate/ecs.py
|
doudoudzj/ecsmate
|
dda508a64ef9d6979dcc83377bb007d2a0acec30
|
[
"Apache-2.0"
] | null | null | null |
#-*- coding: utf-8 -*-
#
# Copyright (c) 2012, ECSMate development team
# All rights reserved.
#
# ECSMate is distributed under the terms of the (new) BSD License.
# The full license can be found in 'LICENSE.txt'.
"""ECS SDK
"""
import time
import hmac
import base64
import hashlib
import urllib
import json
import inspect
from random import random
if __name__ == '__main__':
import pprint
pp = pprint.PrettyPrinter(indent=4)
AccessKeyID = ''
AccessKeySecret = ''
ecs = ECS(AccessKeyID, AccessKeySecret)
if 0:
print '## Regions\n'
regions = ecs.DescribeRegions()[1]
pp.pprint(regions)
print
for region in regions['Regions']:
print '## Zones in %s\n' % region['RegionCode']
zones = ecs.DescribeZones(region['RegionCode'])
if not zones[0]:
pp.pprint(zones)
continue
zones = zones[1]
pp.pprint(zones)
print
for zone in zones['Zones']:
print '## Instances in %s\n' % zone['ZoneCode']
instances = ecs.DescribeInstanceStatus(region['RegionCode'], zone['ZoneCode'])[1]
pp.pprint(instances)
print
print
#pp.pprint(ecs.DescribeInstanceStatus(PageSize=10, PageNumber=1))
#pp.pprint(ecs.DescribeInstanceStatus('cn-hangzhou-dg-a01', 'cn-hangzhou-dg101-a'))
#pp.pprint(ecs.StartInstance('AY1209220917063704221'))
#pp.pprint(ecs.StopInstance('AY1209220917063704221'))
#pp.pprint(ecs.RebootInstance('AY1209220917063704221'))
#pp.pprint(ecs.DescribeInstanceAttribute('AY1209220917063704221'))
#pp.pprint(ecs.DescribeImages(PageSize=10, PageNumber=9))
#pp.pprint(ecs.DescribeDisks('AY1209220917063704221'))
#pp.pprint(ecs.DescribeSnapshots('AY1209220917063704221', '1006-60002839'))
| 36.344569 | 102 | 0.622424 |
e67c1789de35ce33eb29e291ba0e431b4c1c574b
| 4,002 |
py
|
Python
|
tacker/api/v1/resource.py
|
mail2nsrajesh/tacker
|
dce6690659836c2885f1cf8227c19be234f8fe25
|
[
"Apache-2.0"
] | null | null | null |
tacker/api/v1/resource.py
|
mail2nsrajesh/tacker
|
dce6690659836c2885f1cf8227c19be234f8fe25
|
[
"Apache-2.0"
] | null | null | null |
tacker/api/v1/resource.py
|
mail2nsrajesh/tacker
|
dce6690659836c2885f1cf8227c19be234f8fe25
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods for working with WSGI servers redux
"""
from oslo_log import log as logging
import webob.dec
from tacker.api import api_common
from tacker import wsgi
LOG = logging.getLogger(__name__)
def Resource(controller, faults=None, deserializers=None, serializers=None):
"""API entity resource.
Represents an API entity resource and the associated serialization and
deserialization logic
"""
default_deserializers = {'application/json': wsgi.JSONDeserializer()}
default_serializers = {'application/json': wsgi.JSONDictSerializer()}
format_types = {'json': 'application/json'}
action_status = dict(create=201, delete=204)
default_deserializers.update(deserializers or {})
default_serializers.update(serializers or {})
deserializers = default_deserializers
serializers = default_serializers
faults = faults or {}
return resource
_NO_ARGS_MARKER = object()
| 33.630252 | 78 | 0.613193 |
e67c30a42d5e25d4e6e974aeebd81a4f702b3cd2
| 5,417 |
py
|
Python
|
akinator/utils.py
|
GitHubEmploy/akinator.py
|
67c688b0332f4caa72bacc8fbc8f95abfe2290c9
|
[
"MIT"
] | null | null | null |
akinator/utils.py
|
GitHubEmploy/akinator.py
|
67c688b0332f4caa72bacc8fbc8f95abfe2290c9
|
[
"MIT"
] | null | null | null |
akinator/utils.py
|
GitHubEmploy/akinator.py
|
67c688b0332f4caa72bacc8fbc8f95abfe2290c9
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2019 NinjaSnail1080
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from .exceptions import InvalidAnswerError, InvalidLanguageError, AkiConnectionFailure, AkiTimedOut, AkiNoQuestions, AkiServerDown, AkiTechnicalError
import re
import json
def ans_to_id(ans):
"""Convert an input answer string into an Answer ID for Akinator"""
ans = str(ans).lower()
if ans == "yes" or ans == "y" or ans == "0":
return "0"
elif ans == "no" or ans == "n" or ans == "1":
return "1"
elif ans == "i" or ans == "idk" or ans == "i dont know" or ans == "i don't know" or ans == "2":
return "2"
elif ans == "probably" or ans == "p" or ans == "3":
return "3"
elif ans == "probably not" or ans == "pn" or ans == "4":
return "4"
else:
raise InvalidAnswerError("""
You put "{}", which is an invalid answer.
The answer must be one of these:
- "yes" OR "y" OR "0" for YES
- "no" OR "n" OR "1" for NO
- "i" OR "idk" OR "i dont know" OR "i don't know" OR "2" for I DON'T KNOW
- "probably" OR "p" OR "3" for PROBABLY
- "probably not" OR "pn" OR "4" for PROBABLY NOT
""".format(ans))
def get_lang_and_theme(lang=None):
"""Returns the language code and theme based on what is input"""
if lang is None or lang == "en" or lang == "english":
return {"lang": "en", "theme": "c"}
elif lang == "en_animals" or lang == "english_animals":
return {"lang": "en", "theme": "a"}
elif lang == "en_objects" or lang == "english_objects":
return {"lang": "en", "theme": "o"}
elif lang == "ar" or lang == "arabic":
return {"lang": "ar", "theme": "c"}
elif lang == "cn" or lang == "chinese":
return {"lang": "cn", "theme": "c"}
elif lang == "de" or lang == "german":
return {"lang": "de", "theme": "c"}
elif lang == "de_animals" or lang == "german_animals":
return {"lang": "de", "theme": "a"}
elif lang == "es" or lang == "spanish":
return {"lang": "es", "theme": "c"}
elif lang == "es_animals" or lang == "spanish_animals":
return {"lang": "es", "theme": "a"}
elif lang == "fr" or lang == "french":
return {"lang": "fr", "theme": "c"}
elif lang == "fr_animals" or lang == "french_animals":
return {"lang": "fr", "theme": "a"}
elif lang == "fr_objects" or lang == "french_objects":
return {"lang": "fr", "theme": "o"}
elif lang == "il" or lang == "hebrew":
return {"lang": "il", "theme": "c"}
elif lang == "it" or lang == "italian":
return {"lang": "it", "theme": "c"}
elif lang == "it_animals" or lang == "italian_animals":
return {"lang": "it", "theme": "a"}
elif lang == "jp" or lang == "japanese":
return {"lang": "jp", "theme": "c"}
elif lang == "jp_animals" or lang == "japanese_animals":
return {"lang": "jp", "theme": "a"}
elif lang == "kr" or lang == "korean":
return {"lang": "kr", "theme": "c"}
elif lang == "nl" or lang == "dutch":
return {"lang": "nl", "theme": "c"}
elif lang == "pl" or lang == "polish":
return {"lang": "pl", "theme": "c"}
elif lang == "pt" or lang == "portuguese":
return {"lang": "pt", "theme": "c"}
elif lang == "ru" or lang == "russian":
return {"lang": "ru", "theme": "c"}
elif lang == "tr" or lang == "turkish":
return {"lang": "tr", "theme": "c"}
else:
raise InvalidLanguageError("You put \"{}\", which is an invalid language.".format(lang))
def raise_connection_error(response):
"""Raise the proper error if the API failed to connect"""
if response == "KO - SERVER DOWN":
raise AkiServerDown("Akinator's servers are down in this region. Try again later or use a different language")
elif response == "KO - TECHNICAL ERROR":
raise AkiTechnicalError("Akinator's servers have had a technical error. Try again later or use a different language")
elif response == "KO - TIMEOUT":
raise AkiTimedOut("Your Akinator session has timed out")
elif response == "KO - ELEM LIST IS EMPTY" or response == "WARN - NO QUESTION":
raise AkiNoQuestions("\"Akinator.step\" reached 80. No more questions")
else:
raise AkiConnectionFailure("An unknown error has occured. Server response: {}".format(response))
| 44.04065 | 149 | 0.606055 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.